2024-11-07 17:15:07,736 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-07 17:15:07,750 main DEBUG Took 0.011700 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 17:15:07,750 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 17:15:07,750 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 17:15:07,751 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 17:15:07,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,759 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 17:15:07,770 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,772 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,772 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,772 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,773 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,773 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,774 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,774 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,775 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,775 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,776 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,776 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,776 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,777 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,778 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,778 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,778 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,779 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,779 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,779 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,780 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,780 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 17:15:07,780 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,780 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 17:15:07,782 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 17:15:07,783 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 17:15:07,784 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 17:15:07,785 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 17:15:07,786 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 17:15:07,786 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 17:15:07,794 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 17:15:07,797 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 17:15:07,798 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 17:15:07,799 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 17:15:07,799 main DEBUG createAppenders(={Console}) 2024-11-07 17:15:07,800 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-07 17:15:07,800 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-07 17:15:07,800 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-07 17:15:07,801 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 17:15:07,801 main DEBUG OutputStream closed 2024-11-07 17:15:07,801 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 17:15:07,801 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 17:15:07,802 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-07 17:15:07,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 17:15:07,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 17:15:07,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 17:15:07,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 17:15:07,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 17:15:07,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 17:15:07,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 17:15:07,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 17:15:07,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 17:15:07,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 17:15:07,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 17:15:07,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 17:15:07,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 17:15:07,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 17:15:07,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 17:15:07,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 17:15:07,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 17:15:07,876 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 17:15:07,878 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 17:15:07,878 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-07 17:15:07,879 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 17:15:07,880 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-07T17:15:08,144 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192 2024-11-07 17:15:08,146 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 17:15:08,147 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T17:15:08,156 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-07T17:15:08,179 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T17:15:08,183 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3, deleteOnExit=true 2024-11-07T17:15:08,184 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-07T17:15:08,185 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/test.cache.data in system properties and HBase conf 2024-11-07T17:15:08,185 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T17:15:08,186 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/hadoop.log.dir in system properties and HBase conf 2024-11-07T17:15:08,187 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T17:15:08,188 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T17:15:08,188 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-07T17:15:08,281 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-07T17:15:08,370 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T17:15:08,374 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T17:15:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T17:15:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T17:15:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T17:15:08,377 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T17:15:08,377 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T17:15:08,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T17:15:08,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T17:15:08,379 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T17:15:08,379 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/nfs.dump.dir in system properties and HBase conf 2024-11-07T17:15:08,380 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/java.io.tmpdir in system properties and HBase conf 2024-11-07T17:15:08,380 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T17:15:08,380 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T17:15:08,381 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T17:15:09,364 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-07T17:15:09,456 INFO [Time-limited test {}] log.Log(170): Logging initialized @2408ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-07T17:15:09,549 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T17:15:09,616 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T17:15:09,640 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T17:15:09,640 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T17:15:09,642 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T17:15:09,657 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T17:15:09,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/hadoop.log.dir/,AVAILABLE} 2024-11-07T17:15:09,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T17:15:09,867 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/java.io.tmpdir/jetty-localhost-36835-hadoop-hdfs-3_4_1-tests_jar-_-any-3079853546898603197/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T17:15:09,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:36835} 2024-11-07T17:15:09,878 INFO [Time-limited test {}] server.Server(415): Started @2830ms 2024-11-07T17:15:10,284 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T17:15:10,291 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T17:15:10,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T17:15:10,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T17:15:10,293 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T17:15:10,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/hadoop.log.dir/,AVAILABLE} 2024-11-07T17:15:10,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T17:15:10,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/java.io.tmpdir/jetty-localhost-35775-hadoop-hdfs-3_4_1-tests_jar-_-any-11364636624361061995/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T17:15:10,415 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:35775} 2024-11-07T17:15:10,416 INFO [Time-limited test {}] server.Server(415): Started @3368ms 2024-11-07T17:15:10,472 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T17:15:10,961 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/dfs/data/data2/current/BP-2016808812-172.17.0.2-1730999709066/current, will proceed with Du for space computation calculation, 2024-11-07T17:15:10,961 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/dfs/data/data1/current/BP-2016808812-172.17.0.2-1730999709066/current, will proceed with Du for space computation calculation, 2024-11-07T17:15:11,000 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T17:15:11,053 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b2f503f23ccafa1 with lease ID 0x138548f62e2239b: Processing first storage report for DS-e09504c5-e6c3-4f4b-baa1-f4575542ab61 from datanode DatanodeRegistration(127.0.0.1:33051, datanodeUuid=5e212fae-8090-491e-bac2-98c16dabef77, infoPort=39725, infoSecurePort=0, ipcPort=46473, storageInfo=lv=-57;cid=testClusterID;nsid=1792146345;c=1730999709066) 2024-11-07T17:15:11,054 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b2f503f23ccafa1 with lease ID 0x138548f62e2239b: from storage DS-e09504c5-e6c3-4f4b-baa1-f4575542ab61 node DatanodeRegistration(127.0.0.1:33051, datanodeUuid=5e212fae-8090-491e-bac2-98c16dabef77, infoPort=39725, infoSecurePort=0, ipcPort=46473, storageInfo=lv=-57;cid=testClusterID;nsid=1792146345;c=1730999709066), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-07T17:15:11,055 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b2f503f23ccafa1 with lease ID 0x138548f62e2239b: Processing first storage report for DS-916f898f-ac8f-4f10-8989-94b61bc9458b from datanode DatanodeRegistration(127.0.0.1:33051, datanodeUuid=5e212fae-8090-491e-bac2-98c16dabef77, infoPort=39725, infoSecurePort=0, ipcPort=46473, storageInfo=lv=-57;cid=testClusterID;nsid=1792146345;c=1730999709066) 2024-11-07T17:15:11,055 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b2f503f23ccafa1 with lease ID 0x138548f62e2239b: from storage DS-916f898f-ac8f-4f10-8989-94b61bc9458b node DatanodeRegistration(127.0.0.1:33051, datanodeUuid=5e212fae-8090-491e-bac2-98c16dabef77, infoPort=39725, infoSecurePort=0, ipcPort=46473, storageInfo=lv=-57;cid=testClusterID;nsid=1792146345;c=1730999709066), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T17:15:11,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192 2024-11-07T17:15:11,221 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/zookeeper_0, clientPort=64938, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T17:15:11,231 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64938 2024-11-07T17:15:11,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:11,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:11,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741825_1001 (size=7) 2024-11-07T17:15:11,894 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 with version=8 2024-11-07T17:15:11,895 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/hbase-staging 2024-11-07T17:15:12,023 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-07T17:15:12,291 INFO [Time-limited test {}] client.ConnectionUtils(129): master/3a0fde618c86:0 server-side Connection retries=45 2024-11-07T17:15:12,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T17:15:12,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T17:15:12,311 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T17:15:12,312 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T17:15:12,312 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T17:15:12,444 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T17:15:12,504 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-07T17:15:12,513 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-07T17:15:12,517 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T17:15:12,544 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 4590 (auto-detected) 2024-11-07T17:15:12,545 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-07T17:15:12,564 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35383 2024-11-07T17:15:12,572 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:12,574 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:12,586 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35383 connecting to ZooKeeper ensemble=127.0.0.1:64938 2024-11-07T17:15:12,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:353830x0, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T17:15:12,622 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35383-0x10183baeb4b0000 connected 2024-11-07T17:15:12,650 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T17:15:12,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T17:15:12,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T17:15:12,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35383 2024-11-07T17:15:12,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35383 2024-11-07T17:15:12,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35383 2024-11-07T17:15:12,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35383 2024-11-07T17:15:12,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35383 2024-11-07T17:15:12,670 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17, hbase.cluster.distributed=false 2024-11-07T17:15:12,735 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/3a0fde618c86:0 server-side Connection retries=45 2024-11-07T17:15:12,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T17:15:12,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T17:15:12,736 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T17:15:12,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T17:15:12,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T17:15:12,739 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T17:15:12,741 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T17:15:12,742 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37403 2024-11-07T17:15:12,744 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T17:15:12,750 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T17:15:12,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:12,754 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:12,758 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37403 connecting to ZooKeeper ensemble=127.0.0.1:64938 2024-11-07T17:15:12,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374030x0, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T17:15:12,762 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:374030x0, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T17:15:12,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37403-0x10183baeb4b0001 connected 2024-11-07T17:15:12,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T17:15:12,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T17:15:12,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37403 2024-11-07T17:15:12,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37403 2024-11-07T17:15:12,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37403 2024-11-07T17:15:12,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37403 2024-11-07T17:15:12,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37403 2024-11-07T17:15:12,772 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/3a0fde618c86,35383,1730999712016 2024-11-07T17:15:12,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T17:15:12,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T17:15:12,781 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3a0fde618c86,35383,1730999712016 2024-11-07T17:15:12,791 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3a0fde618c86:35383 2024-11-07T17:15:12,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T17:15:12,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T17:15:12,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:12,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:12,805 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T17:15:12,806 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3a0fde618c86,35383,1730999712016 from backup master directory 2024-11-07T17:15:12,806 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T17:15:12,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3a0fde618c86,35383,1730999712016 2024-11-07T17:15:12,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T17:15:12,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T17:15:12,809 WARN [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T17:15:12,810 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3a0fde618c86,35383,1730999712016 2024-11-07T17:15:12,811 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-07T17:15:12,813 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-07T17:15:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741826_1002 (size=42) 2024-11-07T17:15:13,281 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/hbase.id with ID: 7d665ca7-da52-4207-be86-8f21042e57d1 2024-11-07T17:15:13,325 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T17:15:13,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:13,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:13,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741827_1003 (size=196) 2024-11-07T17:15:13,784 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:15:13,787 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T17:15:13,804 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:13,808 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T17:15:13,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741828_1004 (size=1189) 2024-11-07T17:15:14,256 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store 2024-11-07T17:15:14,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741829_1005 (size=34) 2024-11-07T17:15:14,676 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-07T17:15:14,676 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:14,677 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T17:15:14,677 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:15:14,678 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:15:14,678 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T17:15:14,678 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:15:14,678 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:15:14,678 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T17:15:14,680 WARN [master/3a0fde618c86:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/.initializing 2024-11-07T17:15:14,680 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/WALs/3a0fde618c86,35383,1730999712016 2024-11-07T17:15:14,686 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T17:15:14,697 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3a0fde618c86%2C35383%2C1730999712016, suffix=, logDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/WALs/3a0fde618c86,35383,1730999712016, archiveDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/oldWALs, maxLogs=10 2024-11-07T17:15:14,718 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/WALs/3a0fde618c86,35383,1730999712016/3a0fde618c86%2C35383%2C1730999712016.1730999714701, exclude list is [], retry=0 2024-11-07T17:15:14,735 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33051,DS-e09504c5-e6c3-4f4b-baa1-f4575542ab61,DISK] 2024-11-07T17:15:14,738 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-07T17:15:14,773 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/WALs/3a0fde618c86,35383,1730999712016/3a0fde618c86%2C35383%2C1730999712016.1730999714701 2024-11-07T17:15:14,774 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39725:39725)] 2024-11-07T17:15:14,775 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:15:14,775 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:14,778 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,779 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T17:15:14,843 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:14,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:14,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T17:15:14,849 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:14,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:14,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T17:15:14,853 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:14,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:14,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T17:15:14,857 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:14,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:14,862 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,863 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,871 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T17:15:14,874 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T17:15:14,879 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:15:14,880 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67852317, jitterRate=0.011078312993049622}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T17:15:14,884 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T17:15:14,885 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T17:15:14,913 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29c953e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:14,947 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-07T17:15:14,958 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T17:15:14,958 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T17:15:14,960 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T17:15:14,962 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-07T17:15:14,966 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-07T17:15:14,966 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T17:15:14,990 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T17:15:15,001 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T17:15:15,003 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-07T17:15:15,005 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T17:15:15,006 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T17:15:15,008 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-07T17:15:15,010 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T17:15:15,013 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T17:15:15,014 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-07T17:15:15,015 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T17:15:15,017 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T17:15:15,026 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T17:15:15,027 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T17:15:15,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T17:15:15,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T17:15:15,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:15,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:15,031 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=3a0fde618c86,35383,1730999712016, sessionid=0x10183baeb4b0000, setting cluster-up flag (Was=false) 2024-11-07T17:15:15,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:15,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:15,048 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T17:15:15,049 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3a0fde618c86,35383,1730999712016 2024-11-07T17:15:15,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:15,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:15,059 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T17:15:15,060 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3a0fde618c86,35383,1730999712016 2024-11-07T17:15:15,086 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3a0fde618c86:37403 2024-11-07T17:15:15,087 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1008): ClusterId : 7d665ca7-da52-4207-be86-8f21042e57d1 2024-11-07T17:15:15,090 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T17:15:15,094 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T17:15:15,094 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T17:15:15,097 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T17:15:15,098 DEBUG [RS:0;3a0fde618c86:37403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77ae34e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:15,099 DEBUG [RS:0;3a0fde618c86:37403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66016681, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3a0fde618c86/172.17.0.2:0 2024-11-07T17:15:15,102 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-07T17:15:15,102 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-07T17:15:15,102 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-07T17:15:15,104 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(3073): reportForDuty to master=3a0fde618c86,35383,1730999712016 with isa=3a0fde618c86/172.17.0.2:37403, startcode=1730999712734 2024-11-07T17:15:15,116 DEBUG [RS:0;3a0fde618c86:37403 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T17:15:15,141 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-07T17:15:15,150 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-07T17:15:15,151 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33853, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T17:15:15,154 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T17:15:15,159 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35383 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:15,163 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3a0fde618c86,35383,1730999712016 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T17:15:15,166 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3a0fde618c86:0, corePoolSize=5, maxPoolSize=5 2024-11-07T17:15:15,167 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3a0fde618c86:0, corePoolSize=5, maxPoolSize=5 2024-11-07T17:15:15,167 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3a0fde618c86:0, corePoolSize=5, maxPoolSize=5 2024-11-07T17:15:15,167 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3a0fde618c86:0, corePoolSize=5, maxPoolSize=5 2024-11-07T17:15:15,168 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3a0fde618c86:0, corePoolSize=10, maxPoolSize=10 2024-11-07T17:15:15,168 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,168 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3a0fde618c86:0, corePoolSize=2, maxPoolSize=2 2024-11-07T17:15:15,168 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,171 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730999745171 2024-11-07T17:15:15,173 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T17:15:15,174 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T17:15:15,175 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-07T17:15:15,176 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-07T17:15:15,178 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T17:15:15,178 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T17:15:15,179 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T17:15:15,179 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T17:15:15,180 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,182 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:15,182 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T17:15:15,183 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T17:15:15,184 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T17:15:15,185 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T17:15:15,187 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T17:15:15,187 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T17:15:15,192 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3a0fde618c86:0:becomeActiveMaster-HFileCleaner.large.0-1730999715189,5,FailOnTimeoutGroup] 2024-11-07T17:15:15,192 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3a0fde618c86:0:becomeActiveMaster-HFileCleaner.small.0-1730999715192,5,FailOnTimeoutGroup] 2024-11-07T17:15:15,192 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,193 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T17:15:15,194 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741831_1007 (size=1039) 2024-11-07T17:15:15,194 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,195 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-07T17:15:15,195 WARN [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-07T17:15:15,296 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(3073): reportForDuty to master=3a0fde618c86,35383,1730999712016 with isa=3a0fde618c86/172.17.0.2:37403, startcode=1730999712734 2024-11-07T17:15:15,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35383 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:15,301 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35383 {}] master.ServerManager(486): Registering regionserver=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:15,309 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:15:15,309 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39903 2024-11-07T17:15:15,309 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-07T17:15:15,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T17:15:15,313 DEBUG [RS:0;3a0fde618c86:37403 {}] zookeeper.ZKUtil(111): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3a0fde618c86,37403,1730999712734 2024-11-07T17:15:15,314 WARN [RS:0;3a0fde618c86:37403 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T17:15:15,314 INFO [RS:0;3a0fde618c86:37403 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T17:15:15,314 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734 2024-11-07T17:15:15,315 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3a0fde618c86,37403,1730999712734] 2024-11-07T17:15:15,327 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-07T17:15:15,338 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T17:15:15,352 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T17:15:15,354 INFO [RS:0;3a0fde618c86:37403 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T17:15:15,355 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,356 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-07T17:15:15,363 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,363 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,363 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,363 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,363 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,364 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,364 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3a0fde618c86:0, corePoolSize=2, maxPoolSize=2 2024-11-07T17:15:15,364 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,364 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,364 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,365 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,365 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3a0fde618c86:0, corePoolSize=1, maxPoolSize=1 2024-11-07T17:15:15,365 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3a0fde618c86:0, corePoolSize=3, maxPoolSize=3 2024-11-07T17:15:15,366 DEBUG [RS:0;3a0fde618c86:37403 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0, corePoolSize=3, maxPoolSize=3 2024-11-07T17:15:15,366 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,367 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,367 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,367 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,367 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,37403,1730999712734-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T17:15:15,387 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T17:15:15,388 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,37403,1730999712734-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:15,407 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.Replication(204): 3a0fde618c86,37403,1730999712734 started 2024-11-07T17:15:15,407 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1767): Serving as 3a0fde618c86,37403,1730999712734, RpcServer on 3a0fde618c86/172.17.0.2:37403, sessionid=0x10183baeb4b0001 2024-11-07T17:15:15,408 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T17:15:15,408 DEBUG [RS:0;3a0fde618c86:37403 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:15,408 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3a0fde618c86,37403,1730999712734' 2024-11-07T17:15:15,408 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T17:15:15,409 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T17:15:15,409 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T17:15:15,410 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T17:15:15,410 DEBUG [RS:0;3a0fde618c86:37403 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:15,410 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3a0fde618c86,37403,1730999712734' 2024-11-07T17:15:15,410 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T17:15:15,410 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T17:15:15,411 DEBUG [RS:0;3a0fde618c86:37403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T17:15:15,411 INFO [RS:0;3a0fde618c86:37403 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T17:15:15,411 INFO [RS:0;3a0fde618c86:37403 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T17:15:15,516 INFO [RS:0;3a0fde618c86:37403 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T17:15:15,520 INFO [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3a0fde618c86%2C37403%2C1730999712734, suffix=, logDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734, archiveDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/oldWALs, maxLogs=32 2024-11-07T17:15:15,536 DEBUG [RS:0;3a0fde618c86:37403 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734/3a0fde618c86%2C37403%2C1730999712734.1730999715522, exclude list is [], retry=0 2024-11-07T17:15:15,541 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33051,DS-e09504c5-e6c3-4f4b-baa1-f4575542ab61,DISK] 2024-11-07T17:15:15,544 INFO [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734/3a0fde618c86%2C37403%2C1730999712734.1730999715522 2024-11-07T17:15:15,545 DEBUG [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39725:39725)] 2024-11-07T17:15:15,597 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-07T17:15:15,597 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:15:15,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741833_1009 (size=32) 2024-11-07T17:15:16,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:16,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T17:15:16,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T17:15:16,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:16,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T17:15:16,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T17:15:16,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:16,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T17:15:16,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T17:15:16,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:16,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740 2024-11-07T17:15:16,024 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740 2024-11-07T17:15:16,027 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:15:16,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-07T17:15:16,033 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:15:16,034 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74985391, jitterRate=0.1173693984746933}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:15:16,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-07T17:15:16,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-07T17:15:16,036 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-07T17:15:16,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-07T17:15:16,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T17:15:16,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T17:15:16,038 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-07T17:15:16,038 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-07T17:15:16,040 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-07T17:15:16,041 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-07T17:15:16,046 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T17:15:16,054 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T17:15:16,056 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T17:15:16,208 DEBUG [3a0fde618c86:35383 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T17:15:16,212 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:16,217 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3a0fde618c86,37403,1730999712734, state=OPENING 2024-11-07T17:15:16,224 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T17:15:16,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:16,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:16,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T17:15:16,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T17:15:16,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:16,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:16,403 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T17:15:16,406 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T17:15:16,418 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-07T17:15:16,418 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T17:15:16,419 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-07T17:15:16,422 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3a0fde618c86%2C37403%2C1730999712734.meta, suffix=.meta, logDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734, archiveDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/oldWALs, maxLogs=32 2024-11-07T17:15:16,438 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734/3a0fde618c86%2C37403%2C1730999712734.meta.1730999716423.meta, exclude list is [], retry=0 2024-11-07T17:15:16,442 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33051,DS-e09504c5-e6c3-4f4b-baa1-f4575542ab61,DISK] 2024-11-07T17:15:16,445 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/WALs/3a0fde618c86,37403,1730999712734/3a0fde618c86%2C37403%2C1730999712734.meta.1730999716423.meta 2024-11-07T17:15:16,445 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39725:39725)] 2024-11-07T17:15:16,445 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:15:16,447 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T17:15:16,506 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T17:15:16,511 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T17:15:16,515 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T17:15:16,515 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:16,515 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-07T17:15:16,515 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-07T17:15:16,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T17:15:16,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T17:15:16,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:16,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T17:15:16,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T17:15:16,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:16,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T17:15:16,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T17:15:16,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T17:15:16,529 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740 2024-11-07T17:15:16,531 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740 2024-11-07T17:15:16,534 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:15:16,536 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-07T17:15:16,538 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64967906, jitterRate=-0.03190276026725769}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:15:16,539 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-07T17:15:16,546 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730999716396 2024-11-07T17:15:16,557 DEBUG [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T17:15:16,558 INFO [RS_OPEN_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-07T17:15:16,559 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:16,561 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3a0fde618c86,37403,1730999712734, state=OPEN 2024-11-07T17:15:16,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T17:15:16,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T17:15:16,566 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T17:15:16,566 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T17:15:16,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T17:15:16,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=3a0fde618c86,37403,1730999712734 in 338 msec 2024-11-07T17:15:16,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T17:15:16,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 525 msec 2024-11-07T17:15:16,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4860 sec 2024-11-07T17:15:16,582 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730999716582, completionTime=-1 2024-11-07T17:15:16,582 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T17:15:16,582 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-07T17:15:16,619 DEBUG [hconnection-0x3b59e39d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:16,621 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:16,632 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-07T17:15:16,632 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730999776632 2024-11-07T17:15:16,632 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730999836632 2024-11-07T17:15:16,632 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 50 msec 2024-11-07T17:15:16,652 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,35383,1730999712016-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:16,653 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,35383,1730999712016-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:16,653 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,35383,1730999712016-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:16,654 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3a0fde618c86:35383, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:16,655 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:16,660 DEBUG [master/3a0fde618c86:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-07T17:15:16,662 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-07T17:15:16,664 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T17:15:16,670 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-07T17:15:16,672 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:15:16,673 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:16,675 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:15:16,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741835_1011 (size=358) 2024-11-07T17:15:17,090 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3c877d3c3f531453d06f6bdf82c5263b, NAME => 'hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:15:17,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741836_1012 (size=42) 2024-11-07T17:15:17,500 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:17,501 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3c877d3c3f531453d06f6bdf82c5263b, disabling compactions & flushes 2024-11-07T17:15:17,501 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,501 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,501 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. after waiting 0 ms 2024-11-07T17:15:17,501 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,501 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,501 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3c877d3c3f531453d06f6bdf82c5263b: 2024-11-07T17:15:17,503 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:15:17,510 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1730999717504"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999717504"}]},"ts":"1730999717504"} 2024-11-07T17:15:17,534 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:15:17,536 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:15:17,538 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999717536"}]},"ts":"1730999717536"} 2024-11-07T17:15:17,543 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-07T17:15:17,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3c877d3c3f531453d06f6bdf82c5263b, ASSIGN}] 2024-11-07T17:15:17,550 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3c877d3c3f531453d06f6bdf82c5263b, ASSIGN 2024-11-07T17:15:17,552 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3c877d3c3f531453d06f6bdf82c5263b, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:15:17,703 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3c877d3c3f531453d06f6bdf82c5263b, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:17,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3c877d3c3f531453d06f6bdf82c5263b, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:17,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:17,867 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,867 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3c877d3c3f531453d06f6bdf82c5263b, NAME => 'hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:15:17,867 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:17,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,870 INFO [StoreOpener-3c877d3c3f531453d06f6bdf82c5263b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,872 INFO [StoreOpener-3c877d3c3f531453d06f6bdf82c5263b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c877d3c3f531453d06f6bdf82c5263b columnFamilyName info 2024-11-07T17:15:17,873 DEBUG [StoreOpener-3c877d3c3f531453d06f6bdf82c5263b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:17,873 INFO [StoreOpener-3c877d3c3f531453d06f6bdf82c5263b-1 {}] regionserver.HStore(327): Store=3c877d3c3f531453d06f6bdf82c5263b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:17,875 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,875 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:15:17,883 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:15:17,884 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3c877d3c3f531453d06f6bdf82c5263b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60502704, jitterRate=-0.09843945503234863}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T17:15:17,885 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3c877d3c3f531453d06f6bdf82c5263b: 2024-11-07T17:15:17,887 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b., pid=6, masterSystemTime=1730999717860 2024-11-07T17:15:17,890 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,890 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:15:17,891 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3c877d3c3f531453d06f6bdf82c5263b, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:17,897 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T17:15:17,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3c877d3c3f531453d06f6bdf82c5263b, server=3a0fde618c86,37403,1730999712734 in 188 msec 2024-11-07T17:15:17,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T17:15:17,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3c877d3c3f531453d06f6bdf82c5263b, ASSIGN in 349 msec 2024-11-07T17:15:17,903 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:15:17,903 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999717903"}]},"ts":"1730999717903"} 2024-11-07T17:15:17,905 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-07T17:15:17,909 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:15:17,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2450 sec 2024-11-07T17:15:17,973 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-07T17:15:17,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-07T17:15:17,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:17,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:15:18,003 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-07T17:15:18,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-07T17:15:18,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 22 msec 2024-11-07T17:15:18,027 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-07T17:15:18,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-07T17:15:18,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 13 msec 2024-11-07T17:15:18,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-07T17:15:18,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-07T17:15:18,054 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.244sec 2024-11-07T17:15:18,056 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T17:15:18,057 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T17:15:18,058 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T17:15:18,059 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T17:15:18,059 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T17:15:18,060 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,35383,1730999712016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T17:15:18,060 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,35383,1730999712016-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T17:15:18,067 DEBUG [master/3a0fde618c86:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-07T17:15:18,067 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T17:15:18,068 INFO [master/3a0fde618c86:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3a0fde618c86,35383,1730999712016-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T17:15:18,091 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-11-07T17:15:18,091 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-07T17:15:18,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:18,102 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-07T17:15:18,102 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-07T17:15:18,111 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:18,119 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:18,128 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=3a0fde618c86,35383,1730999712016 2024-11-07T17:15:18,143 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=202, ProcessCount=11, AvailableMemoryMB=3832 2024-11-07T17:15:18,178 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:15:18,181 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:15:18,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:15:18,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:15:18,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:18,202 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:15:18,203 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:18,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-07T17:15:18,204 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:15:18,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T17:15:18,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741837_1013 (size=960) 2024-11-07T17:15:18,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T17:15:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T17:15:18,619 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:15:18,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741838_1014 (size=53) 2024-11-07T17:15:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T17:15:19,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:19,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 852ea2728c497a9e191625c6cb13c906, disabling compactions & flushes 2024-11-07T17:15:19,030 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. after waiting 0 ms 2024-11-07T17:15:19,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,030 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,030 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:19,033 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:15:19,033 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730999719033"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999719033"}]},"ts":"1730999719033"} 2024-11-07T17:15:19,037 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:15:19,038 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:15:19,038 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999719038"}]},"ts":"1730999719038"} 2024-11-07T17:15:19,041 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T17:15:19,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, ASSIGN}] 2024-11-07T17:15:19,047 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, ASSIGN 2024-11-07T17:15:19,049 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:15:19,199 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=852ea2728c497a9e191625c6cb13c906, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:19,203 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:19,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T17:15:19,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:19,364 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,365 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:15:19,365 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,366 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:19,366 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,366 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,368 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,371 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:19,372 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 852ea2728c497a9e191625c6cb13c906 columnFamilyName A 2024-11-07T17:15:19,372 DEBUG [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:19,373 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.HStore(327): Store=852ea2728c497a9e191625c6cb13c906/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:19,373 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,375 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:19,376 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 852ea2728c497a9e191625c6cb13c906 columnFamilyName B 2024-11-07T17:15:19,376 DEBUG [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:19,377 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.HStore(327): Store=852ea2728c497a9e191625c6cb13c906/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:19,377 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,379 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:19,379 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 852ea2728c497a9e191625c6cb13c906 columnFamilyName C 2024-11-07T17:15:19,379 DEBUG [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:19,380 INFO [StoreOpener-852ea2728c497a9e191625c6cb13c906-1 {}] regionserver.HStore(327): Store=852ea2728c497a9e191625c6cb13c906/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:19,381 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,383 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,384 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,387 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:15:19,389 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:19,392 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:15:19,393 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 852ea2728c497a9e191625c6cb13c906; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63646276, jitterRate=-0.05159658193588257}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:15:19,394 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:19,396 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., pid=11, masterSystemTime=1730999719357 2024-11-07T17:15:19,399 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,399 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:19,400 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=852ea2728c497a9e191625c6cb13c906, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:19,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-07T17:15:19,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 in 200 msec 2024-11-07T17:15:19,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-07T17:15:19,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, ASSIGN in 361 msec 2024-11-07T17:15:19,411 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:15:19,412 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999719411"}]},"ts":"1730999719411"} 2024-11-07T17:15:19,415 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T17:15:19,418 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:15:19,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2230 sec 2024-11-07T17:15:20,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T17:15:20,327 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-07T17:15:20,332 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-11-07T17:15:20,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,339 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,341 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,344 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:15:20,348 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:15:20,355 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-11-07T17:15:20,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,361 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-11-07T17:15:20,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,367 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-11-07T17:15:20,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,371 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-11-07T17:15:20,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-11-07T17:15:20,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,381 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-11-07T17:15:20,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,387 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-11-07T17:15:20,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,393 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-11-07T17:15:20,396 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,397 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-11-07T17:15:20,400 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:20,407 DEBUG [hconnection-0xaa7b5a2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,407 DEBUG [hconnection-0x57f69c46-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,407 DEBUG [hconnection-0x8fbcdfd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,408 DEBUG [hconnection-0x6953ce0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,410 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,417 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,417 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,418 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:20,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-07T17:15:20,431 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T17:15:20,433 DEBUG [hconnection-0x28a159a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,433 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:20,434 DEBUG [hconnection-0x74c7c505-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,435 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:20,435 DEBUG [hconnection-0x71942b95-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,438 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,439 DEBUG [hconnection-0x609528f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,442 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,454 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,458 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,465 DEBUG [hconnection-0x249e6b7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:20,476 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:20,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:20,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:15:20,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:20,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:20,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:20,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:20,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:20,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T17:15:20,598 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T17:15:20,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:20,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:20,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:20,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8b77b00435a548979b1f95b2525563b8 is 50, key is test_row_0/A:col10/1730999720510/Put/seqid=0 2024-11-07T17:15:20,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999780635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999780653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999780646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999780657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999780659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741839_1015 (size=12001) 2024-11-07T17:15:20,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T17:15:20,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999780771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999780772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999780772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999780773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999780775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T17:15:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:20,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T17:15:20,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:20,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:20,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:20,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999780979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:20,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999780979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999780981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999780982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:20,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:20,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999780983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T17:15:21,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8b77b00435a548979b1f95b2525563b8 2024-11-07T17:15:21,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T17:15:21,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:21,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:21,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:21,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:21,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:21,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:21,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/60a90898901c43fe9d4fcac77fa94ddf is 50, key is test_row_0/B:col10/1730999720510/Put/seqid=0 2024-11-07T17:15:21,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741840_1016 (size=12001) 2024-11-07T17:15:21,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/60a90898901c43fe9d4fcac77fa94ddf 2024-11-07T17:15:21,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999781285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T17:15:21,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:21,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:21,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999781286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:21,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999781288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:21,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999781290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b0a2197ec0dc4fb5b49b565a1c806c00 is 50, key is test_row_0/C:col10/1730999720510/Put/seqid=0 2024-11-07T17:15:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999781291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,332 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-07T17:15:21,334 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T17:15:21,335 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-07T17:15:21,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741841_1017 (size=12001) 2024-11-07T17:15:21,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b0a2197ec0dc4fb5b49b565a1c806c00 2024-11-07T17:15:21,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8b77b00435a548979b1f95b2525563b8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8b77b00435a548979b1f95b2525563b8 2024-11-07T17:15:21,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8b77b00435a548979b1f95b2525563b8, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T17:15:21,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/60a90898901c43fe9d4fcac77fa94ddf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/60a90898901c43fe9d4fcac77fa94ddf 2024-11-07T17:15:21,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/60a90898901c43fe9d4fcac77fa94ddf, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T17:15:21,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b0a2197ec0dc4fb5b49b565a1c806c00 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0a2197ec0dc4fb5b49b565a1c806c00 2024-11-07T17:15:21,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0a2197ec0dc4fb5b49b565a1c806c00, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T17:15:21,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 852ea2728c497a9e191625c6cb13c906 in 898ms, sequenceid=17, compaction requested=false 2024-11-07T17:15:21,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:21,443 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T17:15:21,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:21,444 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T17:15:21,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:21,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:21,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:21,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:21,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:21,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:21,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/386a7c14032b4fa9b2ceab6f741f418a is 50, key is test_row_0/A:col10/1730999720645/Put/seqid=0 2024-11-07T17:15:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741842_1018 (size=12001) 2024-11-07T17:15:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T17:15:21,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:21,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:21,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999781814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999781817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999781817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999781824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999781826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,885 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/386a7c14032b4fa9b2ceab6f741f418a 2024-11-07T17:15:21,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/fb6bfc4adbaa43daba384232877f9741 is 50, key is test_row_0/B:col10/1730999720645/Put/seqid=0 2024-11-07T17:15:21,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999781930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999781929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999781930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999781933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:21,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999781933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:21,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741843_1019 (size=12001) 2024-11-07T17:15:21,949 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/fb6bfc4adbaa43daba384232877f9741 2024-11-07T17:15:21,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/c5521444a8284f179ce6ed2219016bfd is 50, key is test_row_0/C:col10/1730999720645/Put/seqid=0 2024-11-07T17:15:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741844_1020 (size=12001) 2024-11-07T17:15:22,038 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T17:15:22,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999782137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999782137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999782139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999782140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999782140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,426 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/c5521444a8284f179ce6ed2219016bfd 2024-11-07T17:15:22,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/386a7c14032b4fa9b2ceab6f741f418a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/386a7c14032b4fa9b2ceab6f741f418a 2024-11-07T17:15:22,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999782444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999782445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999782446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999782447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999782447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,460 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/386a7c14032b4fa9b2ceab6f741f418a, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:15:22,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/fb6bfc4adbaa43daba384232877f9741 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/fb6bfc4adbaa43daba384232877f9741 2024-11-07T17:15:22,474 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/fb6bfc4adbaa43daba384232877f9741, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:15:22,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/c5521444a8284f179ce6ed2219016bfd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c5521444a8284f179ce6ed2219016bfd 2024-11-07T17:15:22,493 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c5521444a8284f179ce6ed2219016bfd, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:15:22,495 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 852ea2728c497a9e191625c6cb13c906 in 1050ms, sequenceid=37, compaction requested=false 2024-11-07T17:15:22,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:22,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:22,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-07T17:15:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-07T17:15:22,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T17:15:22,502 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T17:15:22,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-07T17:15:22,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0620 sec 2024-11-07T17:15:22,504 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-07T17:15:22,504 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-07T17:15:22,506 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T17:15:22,506 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T17:15:22,506 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.0800 sec 2024-11-07T17:15:22,506 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T17:15:22,506 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-07T17:15:22,507 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T17:15:22,508 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-07T17:15:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T17:15:22,544 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-07T17:15:22,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-07T17:15:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T17:15:22,550 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:22,552 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:22,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T17:15:22,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-07T17:15:22,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:22,708 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:15:22,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:22,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:22,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:22,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:22,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:22,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:22,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/e195725e858a435baab24376d4267640 is 50, key is test_row_0/A:col10/1730999721820/Put/seqid=0 2024-11-07T17:15:22,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741845_1021 (size=12001) 2024-11-07T17:15:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T17:15:22,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:22,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:22,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999782990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999782989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:22,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:22,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999782991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999782997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999782998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999783101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999783101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999783101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999783102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999783105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,143 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/e195725e858a435baab24376d4267640 2024-11-07T17:15:23,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T17:15:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/2aecf4c241eb49389b6b7fc7313ce7ed is 50, key is test_row_0/B:col10/1730999721820/Put/seqid=0 2024-11-07T17:15:23,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741846_1022 (size=12001) 2024-11-07T17:15:23,195 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/2aecf4c241eb49389b6b7fc7313ce7ed 2024-11-07T17:15:23,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/097857a35e0a4774acaa2723f8df72ba is 50, key is test_row_0/C:col10/1730999721820/Put/seqid=0 2024-11-07T17:15:23,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741847_1023 (size=12001) 2024-11-07T17:15:23,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999783309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999783310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999783310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999783310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999783311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999783615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999783617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999783619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999783620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999783622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:23,652 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/097857a35e0a4774acaa2723f8df72ba 2024-11-07T17:15:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T17:15:23,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/e195725e858a435baab24376d4267640 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/e195725e858a435baab24376d4267640 2024-11-07T17:15:23,687 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/e195725e858a435baab24376d4267640, entries=150, sequenceid=53, filesize=11.7 K 2024-11-07T17:15:23,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/2aecf4c241eb49389b6b7fc7313ce7ed as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/2aecf4c241eb49389b6b7fc7313ce7ed 2024-11-07T17:15:23,703 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/2aecf4c241eb49389b6b7fc7313ce7ed, entries=150, sequenceid=53, filesize=11.7 K 2024-11-07T17:15:23,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/097857a35e0a4774acaa2723f8df72ba as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/097857a35e0a4774acaa2723f8df72ba 2024-11-07T17:15:23,718 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/097857a35e0a4774acaa2723f8df72ba, entries=150, sequenceid=53, filesize=11.7 K 2024-11-07T17:15:23,720 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 852ea2728c497a9e191625c6cb13c906 in 1012ms, sequenceid=53, compaction requested=true 2024-11-07T17:15:23,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:23,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:23,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-07T17:15:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-07T17:15:23,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-07T17:15:23,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1710 sec 2024-11-07T17:15:23,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.1800 sec 2024-11-07T17:15:24,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:15:24,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/39c16f66d4d1442a85bb8c9eb897afc3 is 50, key is test_row_0/A:col10/1730999722993/Put/seqid=0 2024-11-07T17:15:24,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999784148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999784148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999784151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999784156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999784157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741848_1024 (size=12001) 2024-11-07T17:15:24,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/39c16f66d4d1442a85bb8c9eb897afc3 2024-11-07T17:15:24,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/59123d9b9e724c2497084651d280ef87 is 50, key is test_row_0/B:col10/1730999722993/Put/seqid=0 2024-11-07T17:15:24,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741849_1025 (size=12001) 2024-11-07T17:15:24,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/59123d9b9e724c2497084651d280ef87 2024-11-07T17:15:24,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/fcae88eca3314a6d8cbfbbbccffc4618 is 50, key is test_row_0/C:col10/1730999722993/Put/seqid=0 2024-11-07T17:15:24,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999784259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999784260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999784262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999784260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999784264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741850_1026 (size=12001) 2024-11-07T17:15:24,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/fcae88eca3314a6d8cbfbbbccffc4618 2024-11-07T17:15:24,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/39c16f66d4d1442a85bb8c9eb897afc3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/39c16f66d4d1442a85bb8c9eb897afc3 2024-11-07T17:15:24,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/39c16f66d4d1442a85bb8c9eb897afc3, entries=150, sequenceid=76, filesize=11.7 K 2024-11-07T17:15:24,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/59123d9b9e724c2497084651d280ef87 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/59123d9b9e724c2497084651d280ef87 2024-11-07T17:15:24,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/59123d9b9e724c2497084651d280ef87, entries=150, sequenceid=76, filesize=11.7 K 2024-11-07T17:15:24,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/fcae88eca3314a6d8cbfbbbccffc4618 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/fcae88eca3314a6d8cbfbbbccffc4618 2024-11-07T17:15:24,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/fcae88eca3314a6d8cbfbbbccffc4618, entries=150, sequenceid=76, filesize=11.7 K 2024-11-07T17:15:24,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 852ea2728c497a9e191625c6cb13c906 in 218ms, sequenceid=76, compaction requested=true 2024-11-07T17:15:24,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:24,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:24,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:24,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:24,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:24,350 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:24,350 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:24,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:24,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:24,355 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:24,357 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:24,357 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:24,358 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/60a90898901c43fe9d4fcac77fa94ddf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/fb6bfc4adbaa43daba384232877f9741, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/2aecf4c241eb49389b6b7fc7313ce7ed, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/59123d9b9e724c2497084651d280ef87] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=46.9 K 2024-11-07T17:15:24,359 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 60a90898901c43fe9d4fcac77fa94ddf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730999720480 2024-11-07T17:15:24,359 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:24,360 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:24,360 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:24,360 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting fb6bfc4adbaa43daba384232877f9741, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999720630 2024-11-07T17:15:24,361 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8b77b00435a548979b1f95b2525563b8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/386a7c14032b4fa9b2ceab6f741f418a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/e195725e858a435baab24376d4267640, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/39c16f66d4d1442a85bb8c9eb897afc3] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=46.9 K 2024-11-07T17:15:24,361 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2aecf4c241eb49389b6b7fc7313ce7ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999721808 2024-11-07T17:15:24,363 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 59123d9b9e724c2497084651d280ef87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730999722993 2024-11-07T17:15:24,365 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b77b00435a548979b1f95b2525563b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730999720480 2024-11-07T17:15:24,368 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 386a7c14032b4fa9b2ceab6f741f418a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999720630 2024-11-07T17:15:24,371 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e195725e858a435baab24376d4267640, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999721808 2024-11-07T17:15:24,372 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39c16f66d4d1442a85bb8c9eb897afc3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730999722993 2024-11-07T17:15:24,431 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#13 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:24,432 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/84b7f367ba8b45868c4e0959842475c6 is 50, key is test_row_0/B:col10/1730999722993/Put/seqid=0 2024-11-07T17:15:24,434 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#12 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:24,456 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/451c95139e564769aff35e505af0d30a is 50, key is test_row_0/A:col10/1730999722993/Put/seqid=0 2024-11-07T17:15:24,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741851_1027 (size=12139) 2024-11-07T17:15:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:24,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:15:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:24,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741852_1028 (size=12139) 2024-11-07T17:15:24,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/cfb2d5834c8748549be25c5a3e340039 is 50, key is test_row_0/A:col10/1730999724153/Put/seqid=0 2024-11-07T17:15:24,512 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/451c95139e564769aff35e505af0d30a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/451c95139e564769aff35e505af0d30a 2024-11-07T17:15:24,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741853_1029 (size=16681) 2024-11-07T17:15:24,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/cfb2d5834c8748549be25c5a3e340039 2024-11-07T17:15:24,552 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 451c95139e564769aff35e505af0d30a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:24,552 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:24,552 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=12, startTime=1730999724348; duration=0sec 2024-11-07T17:15:24,553 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:24,553 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:24,553 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:24,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999784547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/a34d49e3e34f4471a784edeb0dbdd8e5 is 50, key is test_row_0/B:col10/1730999724153/Put/seqid=0 2024-11-07T17:15:24,569 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:24,569 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:24,570 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:24,570 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0a2197ec0dc4fb5b49b565a1c806c00, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c5521444a8284f179ce6ed2219016bfd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/097857a35e0a4774acaa2723f8df72ba, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/fcae88eca3314a6d8cbfbbbccffc4618] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=46.9 K 2024-11-07T17:15:24,571 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0a2197ec0dc4fb5b49b565a1c806c00, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730999720480 2024-11-07T17:15:24,572 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5521444a8284f179ce6ed2219016bfd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999720630 2024-11-07T17:15:24,573 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 097857a35e0a4774acaa2723f8df72ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999721808 2024-11-07T17:15:24,574 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcae88eca3314a6d8cbfbbbccffc4618, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730999722993 2024-11-07T17:15:24,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999784556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999784560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999784561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999784567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,612 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#16 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:24,613 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/36a18fb00539472db643838eb06b6eee is 50, key is test_row_0/C:col10/1730999722993/Put/seqid=0 2024-11-07T17:15:24,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741854_1030 (size=12001) 2024-11-07T17:15:24,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/a34d49e3e34f4471a784edeb0dbdd8e5 2024-11-07T17:15:24,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741855_1031 (size=12139) 2024-11-07T17:15:24,644 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/36a18fb00539472db643838eb06b6eee as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/36a18fb00539472db643838eb06b6eee 2024-11-07T17:15:24,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/cbe51743b02c42338b5cbc412e417ad6 is 50, key is test_row_0/C:col10/1730999724153/Put/seqid=0 2024-11-07T17:15:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T17:15:24,659 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-07T17:15:24,659 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into 36a18fb00539472db643838eb06b6eee(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:24,659 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:24,659 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=12, startTime=1730999724350; duration=0sec 2024-11-07T17:15:24,659 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:24,659 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:24,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-07T17:15:24,665 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T17:15:24,666 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:24,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:24,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741856_1032 (size=12001) 2024-11-07T17:15:24,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999784666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/cbe51743b02c42338b5cbc412e417ad6 2024-11-07T17:15:24,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/cfb2d5834c8748549be25c5a3e340039 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cfb2d5834c8748549be25c5a3e340039 2024-11-07T17:15:24,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999784694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999784695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999784695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cfb2d5834c8748549be25c5a3e340039, entries=250, sequenceid=92, filesize=16.3 K 2024-11-07T17:15:24,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/a34d49e3e34f4471a784edeb0dbdd8e5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a34d49e3e34f4471a784edeb0dbdd8e5 2024-11-07T17:15:24,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a34d49e3e34f4471a784edeb0dbdd8e5, entries=150, sequenceid=92, filesize=11.7 K 2024-11-07T17:15:24,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/cbe51743b02c42338b5cbc412e417ad6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cbe51743b02c42338b5cbc412e417ad6 2024-11-07T17:15:24,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cbe51743b02c42338b5cbc412e417ad6, entries=150, sequenceid=92, filesize=11.7 K 2024-11-07T17:15:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999784717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 852ea2728c497a9e191625c6cb13c906 in 257ms, sequenceid=92, compaction requested=false 2024-11-07T17:15:24,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T17:15:24,821 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T17:15:24,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:24,822 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T17:15:24,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:24,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:24,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:24,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:24,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/5d33365a07eb41eaa9c5c2d381bc66bf is 50, key is test_row_0/A:col10/1730999724558/Put/seqid=0 2024-11-07T17:15:24,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741857_1033 (size=12001) 2024-11-07T17:15:24,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:24,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:24,888 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/84b7f367ba8b45868c4e0959842475c6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/84b7f367ba8b45868c4e0959842475c6 2024-11-07T17:15:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T17:15:24,972 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 84b7f367ba8b45868c4e0959842475c6(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:24,972 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:24,972 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=12, startTime=1730999724350; duration=0sec 2024-11-07T17:15:24,972 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:24,972 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:24,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999784984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999784988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:24,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:24,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999784990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999784993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999784994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999785096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999785096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999785097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999785104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999785105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T17:15:25,270 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/5d33365a07eb41eaa9c5c2d381bc66bf 2024-11-07T17:15:25,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/34a69534615048cd81635b2408003a31 is 50, key is test_row_0/B:col10/1730999724558/Put/seqid=0 2024-11-07T17:15:25,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999785303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999785304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999785305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999785310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999785312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741858_1034 (size=12001) 2024-11-07T17:15:25,332 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/34a69534615048cd81635b2408003a31 2024-11-07T17:15:25,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/cf433b1ea72b410590a7d6458b041976 is 50, key is test_row_0/C:col10/1730999724558/Put/seqid=0 2024-11-07T17:15:25,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741859_1035 (size=12001) 2024-11-07T17:15:25,396 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/cf433b1ea72b410590a7d6458b041976 2024-11-07T17:15:25,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/5d33365a07eb41eaa9c5c2d381bc66bf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/5d33365a07eb41eaa9c5c2d381bc66bf 2024-11-07T17:15:25,421 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/5d33365a07eb41eaa9c5c2d381bc66bf, entries=150, sequenceid=114, filesize=11.7 K 2024-11-07T17:15:25,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/34a69534615048cd81635b2408003a31 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/34a69534615048cd81635b2408003a31 2024-11-07T17:15:25,435 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/34a69534615048cd81635b2408003a31, entries=150, sequenceid=114, filesize=11.7 K 2024-11-07T17:15:25,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/cf433b1ea72b410590a7d6458b041976 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cf433b1ea72b410590a7d6458b041976 2024-11-07T17:15:25,452 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cf433b1ea72b410590a7d6458b041976, entries=150, sequenceid=114, filesize=11.7 K 2024-11-07T17:15:25,454 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 852ea2728c497a9e191625c6cb13c906 in 632ms, sequenceid=114, compaction requested=true 2024-11-07T17:15:25,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:25,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:25,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-07T17:15:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-07T17:15:25,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-07T17:15:25,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 790 msec 2024-11-07T17:15:25,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 800 msec 2024-11-07T17:15:25,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:15:25,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:25,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:25,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:25,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:25,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:25,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:25,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/475e4da76a394b258c228c9bae8cfbbd is 50, key is test_row_0/A:col10/1730999724988/Put/seqid=0 2024-11-07T17:15:25,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999785638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999785638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999785645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999785647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999785648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741860_1036 (size=16881) 2024-11-07T17:15:25,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/475e4da76a394b258c228c9bae8cfbbd 2024-11-07T17:15:25,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/74700daf86154f7c955c2d13ee44a690 is 50, key is test_row_0/B:col10/1730999724988/Put/seqid=0 2024-11-07T17:15:25,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741861_1037 (size=12151) 2024-11-07T17:15:25,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/74700daf86154f7c955c2d13ee44a690 2024-11-07T17:15:25,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/bef7c98a8216415fb5b94bf0718ead21 is 50, key is test_row_0/C:col10/1730999724988/Put/seqid=0 2024-11-07T17:15:25,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741862_1038 (size=12151) 2024-11-07T17:15:25,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/bef7c98a8216415fb5b94bf0718ead21 2024-11-07T17:15:25,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999785750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999785752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999785753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999785753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999785753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/475e4da76a394b258c228c9bae8cfbbd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/475e4da76a394b258c228c9bae8cfbbd 2024-11-07T17:15:25,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/475e4da76a394b258c228c9bae8cfbbd, entries=250, sequenceid=134, filesize=16.5 K 2024-11-07T17:15:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T17:15:25,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/74700daf86154f7c955c2d13ee44a690 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/74700daf86154f7c955c2d13ee44a690 2024-11-07T17:15:25,774 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-07T17:15:25,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-07T17:15:25,780 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T17:15:25,781 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:25,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:25,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/74700daf86154f7c955c2d13ee44a690, entries=150, sequenceid=134, filesize=11.9 K 2024-11-07T17:15:25,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/bef7c98a8216415fb5b94bf0718ead21 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bef7c98a8216415fb5b94bf0718ead21 2024-11-07T17:15:25,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bef7c98a8216415fb5b94bf0718ead21, entries=150, sequenceid=134, filesize=11.9 K 2024-11-07T17:15:25,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 852ea2728c497a9e191625c6cb13c906 in 210ms, sequenceid=134, compaction requested=true 2024-11-07T17:15:25,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:25,825 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:25,828 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:25,828 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:25,828 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:25,828 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/451c95139e564769aff35e505af0d30a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cfb2d5834c8748549be25c5a3e340039, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/5d33365a07eb41eaa9c5c2d381bc66bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/475e4da76a394b258c228c9bae8cfbbd] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=56.3 K 2024-11-07T17:15:25,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:25,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:25,829 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:25,829 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 451c95139e564769aff35e505af0d30a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730999722993 2024-11-07T17:15:25,830 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfb2d5834c8748549be25c5a3e340039, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1730999724147 2024-11-07T17:15:25,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:25,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:25,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:25,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:25,830 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d33365a07eb41eaa9c5c2d381bc66bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1730999724551 2024-11-07T17:15:25,831 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 475e4da76a394b258c228c9bae8cfbbd, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1730999724984 2024-11-07T17:15:25,831 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:25,832 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:25,832 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:25,832 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/84b7f367ba8b45868c4e0959842475c6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a34d49e3e34f4471a784edeb0dbdd8e5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/34a69534615048cd81635b2408003a31, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/74700daf86154f7c955c2d13ee44a690] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=47.2 K 2024-11-07T17:15:25,833 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 84b7f367ba8b45868c4e0959842475c6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730999722993 2024-11-07T17:15:25,833 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a34d49e3e34f4471a784edeb0dbdd8e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1730999724153 2024-11-07T17:15:25,834 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 34a69534615048cd81635b2408003a31, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1730999724551 2024-11-07T17:15:25,835 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 74700daf86154f7c955c2d13ee44a690, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1730999724988 2024-11-07T17:15:25,856 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#24 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:25,857 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/3800882c54ef462c8420596ab9fa3eac is 50, key is test_row_0/A:col10/1730999724988/Put/seqid=0 2024-11-07T17:15:25,875 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#25 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:25,875 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/7d95bd4bb482470585dcf7043b17147c is 50, key is test_row_0/B:col10/1730999724988/Put/seqid=0 2024-11-07T17:15:25,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T17:15:25,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741863_1039 (size=12425) 2024-11-07T17:15:25,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741864_1040 (size=12425) 2024-11-07T17:15:25,922 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/7d95bd4bb482470585dcf7043b17147c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/7d95bd4bb482470585dcf7043b17147c 2024-11-07T17:15:25,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-07T17:15:25,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:25,937 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-07T17:15:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:25,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:25,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:25,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:25,940 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 7d95bd4bb482470585dcf7043b17147c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:25,940 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:25,941 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=12, startTime=1730999725829; duration=0sec 2024-11-07T17:15:25,940 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/3800882c54ef462c8420596ab9fa3eac as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/3800882c54ef462c8420596ab9fa3eac 2024-11-07T17:15:25,943 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:25,943 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:25,943 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:25,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/22d4b6e1206d4ea5b69ebfa2d78a8b04 is 50, key is test_row_0/A:col10/1730999725643/Put/seqid=0 2024-11-07T17:15:25,952 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 3800882c54ef462c8420596ab9fa3eac(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:25,953 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:25,953 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=12, startTime=1730999725825; duration=0sec 2024-11-07T17:15:25,953 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:25,953 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:25,955 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:25,955 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:25,955 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:25,955 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/36a18fb00539472db643838eb06b6eee, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cbe51743b02c42338b5cbc412e417ad6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cf433b1ea72b410590a7d6458b041976, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bef7c98a8216415fb5b94bf0718ead21] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=47.2 K 2024-11-07T17:15:25,956 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 36a18fb00539472db643838eb06b6eee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730999722993 2024-11-07T17:15:25,958 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting cbe51743b02c42338b5cbc412e417ad6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1730999724153 2024-11-07T17:15:25,959 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting cf433b1ea72b410590a7d6458b041976, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1730999724551 2024-11-07T17:15:25,960 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting bef7c98a8216415fb5b94bf0718ead21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1730999724988 2024-11-07T17:15:25,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741865_1041 (size=12151) 2024-11-07T17:15:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:25,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:25,966 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/22d4b6e1206d4ea5b69ebfa2d78a8b04 2024-11-07T17:15:25,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/1b5506c4e5004ceea1f412d69302f0dd is 50, key is test_row_0/B:col10/1730999725643/Put/seqid=0 2024-11-07T17:15:25,989 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:25,990 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/bb4b96ba9b524815abe9a2323cd31e35 is 50, key is test_row_0/C:col10/1730999724988/Put/seqid=0 2024-11-07T17:15:25,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999785989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999785988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999785989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999785993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:25,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999785993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741866_1042 (size=12151) 2024-11-07T17:15:26,016 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/1b5506c4e5004ceea1f412d69302f0dd 2024-11-07T17:15:26,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b99afde49bd34d1aae0a91cf669a513c is 50, key is test_row_0/C:col10/1730999725643/Put/seqid=0 2024-11-07T17:15:26,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741867_1043 (size=12425) 2024-11-07T17:15:26,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741868_1044 (size=12151) 2024-11-07T17:15:26,072 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b99afde49bd34d1aae0a91cf669a513c 2024-11-07T17:15:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T17:15:26,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/22d4b6e1206d4ea5b69ebfa2d78a8b04 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/22d4b6e1206d4ea5b69ebfa2d78a8b04 2024-11-07T17:15:26,094 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/22d4b6e1206d4ea5b69ebfa2d78a8b04, entries=150, sequenceid=152, filesize=11.9 K 2024-11-07T17:15:26,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999786095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/1b5506c4e5004ceea1f412d69302f0dd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/1b5506c4e5004ceea1f412d69302f0dd 2024-11-07T17:15:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999786096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999786096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999786100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999786100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,118 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/1b5506c4e5004ceea1f412d69302f0dd, entries=150, sequenceid=152, filesize=11.9 K 2024-11-07T17:15:26,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b99afde49bd34d1aae0a91cf669a513c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b99afde49bd34d1aae0a91cf669a513c 2024-11-07T17:15:26,128 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b99afde49bd34d1aae0a91cf669a513c, entries=150, sequenceid=152, filesize=11.9 K 2024-11-07T17:15:26,130 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 852ea2728c497a9e191625c6cb13c906 in 193ms, sequenceid=152, compaction requested=false 2024-11-07T17:15:26,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:26,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:26,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-07T17:15:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-07T17:15:26,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-07T17:15:26,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 352 msec 2024-11-07T17:15:26,140 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 360 msec 2024-11-07T17:15:26,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:26,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:15:26,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:26,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:26,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:26,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:26,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:26,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:26,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c6a6ce4ea9404736ab586cdb5224694f is 50, key is test_row_0/A:col10/1730999725987/Put/seqid=0 2024-11-07T17:15:26,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741869_1045 (size=14541) 2024-11-07T17:15:26,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c6a6ce4ea9404736ab586cdb5224694f 2024-11-07T17:15:26,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999786330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999786332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999786333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999786333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999786337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/f1695239624c4fd48b3c0212eeabefd1 is 50, key is test_row_0/B:col10/1730999725987/Put/seqid=0 2024-11-07T17:15:26,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741870_1046 (size=12151) 2024-11-07T17:15:26,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/f1695239624c4fd48b3c0212eeabefd1 2024-11-07T17:15:26,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T17:15:26,386 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-07T17:15:26,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:26,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-07T17:15:26,393 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:26,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T17:15:26,394 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:26,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:26,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7bc714f8851d40c58a7cc782cd47316a is 50, key is test_row_0/C:col10/1730999725987/Put/seqid=0 2024-11-07T17:15:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741871_1047 (size=12151) 2024-11-07T17:15:26,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7bc714f8851d40c58a7cc782cd47316a 2024-11-07T17:15:26,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c6a6ce4ea9404736ab586cdb5224694f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c6a6ce4ea9404736ab586cdb5224694f 2024-11-07T17:15:26,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c6a6ce4ea9404736ab586cdb5224694f, entries=200, sequenceid=174, filesize=14.2 K 2024-11-07T17:15:26,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/f1695239624c4fd48b3c0212eeabefd1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f1695239624c4fd48b3c0212eeabefd1 2024-11-07T17:15:26,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999786440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999786441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999786440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999786441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999786444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f1695239624c4fd48b3c0212eeabefd1, entries=150, sequenceid=174, filesize=11.9 K 2024-11-07T17:15:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7bc714f8851d40c58a7cc782cd47316a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7bc714f8851d40c58a7cc782cd47316a 2024-11-07T17:15:26,456 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/bb4b96ba9b524815abe9a2323cd31e35 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bb4b96ba9b524815abe9a2323cd31e35 2024-11-07T17:15:26,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7bc714f8851d40c58a7cc782cd47316a, entries=150, sequenceid=174, filesize=11.9 K 2024-11-07T17:15:26,470 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into bb4b96ba9b524815abe9a2323cd31e35(size=12.1 K), total size for store is 35.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:26,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 852ea2728c497a9e191625c6cb13c906 in 165ms, sequenceid=174, compaction requested=true 2024-11-07T17:15:26,470 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:26,470 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=12, startTime=1730999725830; duration=0sec 2024-11-07T17:15:26,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:26,470 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:26,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:26,470 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:26,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:26,470 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:26,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:26,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:26,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:26,471 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:26,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:26,473 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39117 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:26,474 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:26,474 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:26,474 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:26,474 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:26,474 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:26,474 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/3800882c54ef462c8420596ab9fa3eac, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/22d4b6e1206d4ea5b69ebfa2d78a8b04, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c6a6ce4ea9404736ab586cdb5224694f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=38.2 K 2024-11-07T17:15:26,474 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/7d95bd4bb482470585dcf7043b17147c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/1b5506c4e5004ceea1f412d69302f0dd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f1695239624c4fd48b3c0212eeabefd1] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=35.9 K 2024-11-07T17:15:26,475 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3800882c54ef462c8420596ab9fa3eac, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1730999724988 2024-11-07T17:15:26,475 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d95bd4bb482470585dcf7043b17147c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1730999724988 2024-11-07T17:15:26,476 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 22d4b6e1206d4ea5b69ebfa2d78a8b04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1730999725643 2024-11-07T17:15:26,476 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b5506c4e5004ceea1f412d69302f0dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1730999725643 2024-11-07T17:15:26,476 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1695239624c4fd48b3c0212eeabefd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730999725985 2024-11-07T17:15:26,476 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a6ce4ea9404736ab586cdb5224694f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730999725985 2024-11-07T17:15:26,494 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#33 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:26,495 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/998509a81085482cba5bcb611d836c0a is 50, key is test_row_0/B:col10/1730999725987/Put/seqid=0 2024-11-07T17:15:26,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T17:15:26,512 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:26,513 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8a4bde71b0b34b59ad4e8bcd9521dcfd is 50, key is test_row_0/A:col10/1730999725987/Put/seqid=0 2024-11-07T17:15:26,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741872_1048 (size=12527) 2024-11-07T17:15:26,543 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/998509a81085482cba5bcb611d836c0a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/998509a81085482cba5bcb611d836c0a 2024-11-07T17:15:26,546 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T17:15:26,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:26,547 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T17:15:26,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:26,553 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 998509a81085482cba5bcb611d836c0a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:26,553 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:26,553 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=13, startTime=1730999726470; duration=0sec 2024-11-07T17:15:26,554 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:26,554 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:26,554 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:26,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741873_1049 (size=12527) 2024-11-07T17:15:26,556 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:26,556 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:26,557 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:26,557 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bb4b96ba9b524815abe9a2323cd31e35, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b99afde49bd34d1aae0a91cf669a513c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7bc714f8851d40c58a7cc782cd47316a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=35.9 K 2024-11-07T17:15:26,558 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb4b96ba9b524815abe9a2323cd31e35, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1730999724988 2024-11-07T17:15:26,559 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b99afde49bd34d1aae0a91cf669a513c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1730999725643 2024-11-07T17:15:26,561 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bc714f8851d40c58a7cc782cd47316a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730999725985 2024-11-07T17:15:26,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/cb11707b8e9e48a3b0c3382dd6f755c5 is 50, key is test_row_0/A:col10/1730999726329/Put/seqid=0 2024-11-07T17:15:26,570 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8a4bde71b0b34b59ad4e8bcd9521dcfd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8a4bde71b0b34b59ad4e8bcd9521dcfd 2024-11-07T17:15:26,580 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 8a4bde71b0b34b59ad4e8bcd9521dcfd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:26,580 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:26,580 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=13, startTime=1730999726470; duration=0sec 2024-11-07T17:15:26,581 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:26,581 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:26,590 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:26,590 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7d6e9ee3aa6b43aaa6c9a7a21604339d is 50, key is test_row_0/C:col10/1730999725987/Put/seqid=0 2024-11-07T17:15:26,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741874_1050 (size=12151) 2024-11-07T17:15:26,635 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/cb11707b8e9e48a3b0c3382dd6f755c5 2024-11-07T17:15:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741875_1051 (size=12527) 2024-11-07T17:15:26,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/a18b7db1afc145ea9e84ddcc39a538d7 is 50, key is test_row_0/B:col10/1730999726329/Put/seqid=0 2024-11-07T17:15:26,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:26,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:26,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741876_1052 (size=12151) 2024-11-07T17:15:26,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999786675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999786675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999786676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999786678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999786681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T17:15:26,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999786781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999786786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999786786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999786785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999786786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999786988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999786990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999786990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999786990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999786990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T17:15:27,047 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7d6e9ee3aa6b43aaa6c9a7a21604339d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7d6e9ee3aa6b43aaa6c9a7a21604339d 2024-11-07T17:15:27,061 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into 7d6e9ee3aa6b43aaa6c9a7a21604339d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:27,061 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:27,061 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=13, startTime=1730999726470; duration=0sec 2024-11-07T17:15:27,062 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:27,062 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:27,071 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/a18b7db1afc145ea9e84ddcc39a538d7 2024-11-07T17:15:27,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/91d4d8ffd5474c2aa57af84972c1c0ae is 50, key is test_row_0/C:col10/1730999726329/Put/seqid=0 2024-11-07T17:15:27,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741877_1053 (size=12151) 2024-11-07T17:15:27,120 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/91d4d8ffd5474c2aa57af84972c1c0ae 2024-11-07T17:15:27,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/cb11707b8e9e48a3b0c3382dd6f755c5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cb11707b8e9e48a3b0c3382dd6f755c5 2024-11-07T17:15:27,145 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cb11707b8e9e48a3b0c3382dd6f755c5, entries=150, sequenceid=192, filesize=11.9 K 2024-11-07T17:15:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/a18b7db1afc145ea9e84ddcc39a538d7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a18b7db1afc145ea9e84ddcc39a538d7 2024-11-07T17:15:27,161 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a18b7db1afc145ea9e84ddcc39a538d7, entries=150, sequenceid=192, filesize=11.9 K 2024-11-07T17:15:27,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/91d4d8ffd5474c2aa57af84972c1c0ae as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/91d4d8ffd5474c2aa57af84972c1c0ae 2024-11-07T17:15:27,177 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/91d4d8ffd5474c2aa57af84972c1c0ae, entries=150, sequenceid=192, filesize=11.9 K 2024-11-07T17:15:27,179 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 852ea2728c497a9e191625c6cb13c906 in 632ms, sequenceid=192, compaction requested=false 2024-11-07T17:15:27,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:27,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-07T17:15:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-07T17:15:27,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-07T17:15:27,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 787 msec 2024-11-07T17:15:27,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 794 msec 2024-11-07T17:15:27,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:27,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T17:15:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:27,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c70c6944b3e14b8e84b8bb72c3967c3f is 50, key is test_row_0/A:col10/1730999726671/Put/seqid=0 2024-11-07T17:15:27,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999787315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999787318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999787320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741878_1054 (size=12151) 2024-11-07T17:15:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999787325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c70c6944b3e14b8e84b8bb72c3967c3f 2024-11-07T17:15:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999787325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/bfa2cc754cd64e5ebcd0e840ec853f63 is 50, key is test_row_0/B:col10/1730999726671/Put/seqid=0 2024-11-07T17:15:27,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741879_1055 (size=12151) 2024-11-07T17:15:27,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/bfa2cc754cd64e5ebcd0e840ec853f63 2024-11-07T17:15:27,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e0f4ed02be4347fcbfb4f44ff65b3401 is 50, key is test_row_0/C:col10/1730999726671/Put/seqid=0 2024-11-07T17:15:27,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741880_1056 (size=12151) 2024-11-07T17:15:27,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999787426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999787428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999787428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999787428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999787429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T17:15:27,499 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-07T17:15:27,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-07T17:15:27,504 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:27,505 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:27,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:27,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999787632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999787633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999787636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999787634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999787636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e0f4ed02be4347fcbfb4f44ff65b3401 2024-11-07T17:15:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:27,815 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:27,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:27,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c70c6944b3e14b8e84b8bb72c3967c3f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c70c6944b3e14b8e84b8bb72c3967c3f 2024-11-07T17:15:27,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c70c6944b3e14b8e84b8bb72c3967c3f, entries=150, sequenceid=216, filesize=11.9 K 2024-11-07T17:15:27,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/bfa2cc754cd64e5ebcd0e840ec853f63 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bfa2cc754cd64e5ebcd0e840ec853f63 2024-11-07T17:15:27,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bfa2cc754cd64e5ebcd0e840ec853f63, entries=150, sequenceid=216, filesize=11.9 K 2024-11-07T17:15:27,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e0f4ed02be4347fcbfb4f44ff65b3401 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e0f4ed02be4347fcbfb4f44ff65b3401 2024-11-07T17:15:27,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e0f4ed02be4347fcbfb4f44ff65b3401, entries=150, sequenceid=216, filesize=11.9 K 2024-11-07T17:15:27,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 852ea2728c497a9e191625c6cb13c906 in 551ms, sequenceid=216, compaction requested=true 2024-11-07T17:15:27,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:27,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:27,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:27,849 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:27,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:27,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:27,849 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:27,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:27,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:27,851 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:27,851 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:27,851 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:27,851 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,851 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:27,851 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,851 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8a4bde71b0b34b59ad4e8bcd9521dcfd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cb11707b8e9e48a3b0c3382dd6f755c5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c70c6944b3e14b8e84b8bb72c3967c3f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.0 K 2024-11-07T17:15:27,851 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/998509a81085482cba5bcb611d836c0a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a18b7db1afc145ea9e84ddcc39a538d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bfa2cc754cd64e5ebcd0e840ec853f63] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.0 K 2024-11-07T17:15:27,852 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a4bde71b0b34b59ad4e8bcd9521dcfd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730999725985 2024-11-07T17:15:27,852 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 998509a81085482cba5bcb611d836c0a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730999725985 2024-11-07T17:15:27,853 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb11707b8e9e48a3b0c3382dd6f755c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1730999726321 2024-11-07T17:15:27,853 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a18b7db1afc145ea9e84ddcc39a538d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1730999726321 2024-11-07T17:15:27,853 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c70c6944b3e14b8e84b8bb72c3967c3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730999726671 2024-11-07T17:15:27,854 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting bfa2cc754cd64e5ebcd0e840ec853f63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730999726671 2024-11-07T17:15:27,878 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:27,880 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/141047d3510f42649e96065a39b1246f is 50, key is test_row_0/B:col10/1730999726671/Put/seqid=0 2024-11-07T17:15:27,881 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:27,882 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/a49a466b2c0b4338bab13e1728941add is 50, key is test_row_0/A:col10/1730999726671/Put/seqid=0 2024-11-07T17:15:27,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741881_1057 (size=12629) 2024-11-07T17:15:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741882_1058 (size=12629) 2024-11-07T17:15:27,912 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/141047d3510f42649e96065a39b1246f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/141047d3510f42649e96065a39b1246f 2024-11-07T17:15:27,913 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/a49a466b2c0b4338bab13e1728941add as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a49a466b2c0b4338bab13e1728941add 2024-11-07T17:15:27,926 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into a49a466b2c0b4338bab13e1728941add(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:27,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:27,927 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=13, startTime=1730999727848; duration=0sec 2024-11-07T17:15:27,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:27,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:27,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:27,927 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 141047d3510f42649e96065a39b1246f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:27,927 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:27,927 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=13, startTime=1730999727849; duration=0sec 2024-11-07T17:15:27,928 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:27,928 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:27,929 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:27,929 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:27,929 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,930 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7d6e9ee3aa6b43aaa6c9a7a21604339d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/91d4d8ffd5474c2aa57af84972c1c0ae, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e0f4ed02be4347fcbfb4f44ff65b3401] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.0 K 2024-11-07T17:15:27,930 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d6e9ee3aa6b43aaa6c9a7a21604339d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730999725985 2024-11-07T17:15:27,931 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91d4d8ffd5474c2aa57af84972c1c0ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1730999726321 2024-11-07T17:15:27,932 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0f4ed02be4347fcbfb4f44ff65b3401, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730999726671 2024-11-07T17:15:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:27,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:15:27,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:27,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:27,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:27,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:27,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:27,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:27,961 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#44 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:27,962 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/800932983a914ffc885ee6a6174c756f is 50, key is test_row_0/C:col10/1730999726671/Put/seqid=0 2024-11-07T17:15:27,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/7c8d4867859c4155851a398670e3edf0 is 50, key is test_row_0/A:col10/1730999727941/Put/seqid=0 2024-11-07T17:15:27,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:27,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:27,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:27,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:27,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999787966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999787967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999787972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999787972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:27,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999787975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:27,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741883_1059 (size=12629) 2024-11-07T17:15:27,999 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/800932983a914ffc885ee6a6174c756f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/800932983a914ffc885ee6a6174c756f 2024-11-07T17:15:28,008 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into 800932983a914ffc885ee6a6174c756f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:28,008 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:28,008 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=13, startTime=1730999727849; duration=0sec 2024-11-07T17:15:28,008 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:28,008 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:28,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741884_1060 (size=14541) 2024-11-07T17:15:28,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999788077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999788077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999788078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999788078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999788079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:28,127 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:28,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:28,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,282 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999788281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:28,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999788283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999788283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999788282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999788287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/7c8d4867859c4155851a398670e3edf0 2024-11-07T17:15:28,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/65c367a95c674ca9938cda40fa220a19 is 50, key is test_row_0/B:col10/1730999727941/Put/seqid=0 2024-11-07T17:15:28,437 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:28,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:28,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741885_1061 (size=12151) 2024-11-07T17:15:28,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/65c367a95c674ca9938cda40fa220a19 2024-11-07T17:15:28,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/667fee38343a4a4391585419bed85ca7 is 50, key is test_row_0/C:col10/1730999727941/Put/seqid=0 2024-11-07T17:15:28,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741886_1062 (size=12151) 2024-11-07T17:15:28,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/667fee38343a4a4391585419bed85ca7 2024-11-07T17:15:28,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/7c8d4867859c4155851a398670e3edf0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7c8d4867859c4155851a398670e3edf0 2024-11-07T17:15:28,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999788587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999788588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999788589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999788590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,594 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:28,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:28,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:28,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7c8d4867859c4155851a398670e3edf0, entries=200, sequenceid=235, filesize=14.2 K 2024-11-07T17:15:28,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:28,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999788592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/65c367a95c674ca9938cda40fa220a19 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/65c367a95c674ca9938cda40fa220a19 2024-11-07T17:15:28,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/65c367a95c674ca9938cda40fa220a19, entries=150, sequenceid=235, filesize=11.9 K 2024-11-07T17:15:28,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/667fee38343a4a4391585419bed85ca7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/667fee38343a4a4391585419bed85ca7 2024-11-07T17:15:28,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/667fee38343a4a4391585419bed85ca7, entries=150, sequenceid=235, filesize=11.9 K 2024-11-07T17:15:28,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:28,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 852ea2728c497a9e191625c6cb13c906 in 677ms, sequenceid=235, compaction requested=false 2024-11-07T17:15:28,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:28,749 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:28,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T17:15:28,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:28,750 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:15:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:28,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/0299fd4d7eab4e399a29037afdfcdd8e is 50, key is test_row_0/A:col10/1730999727971/Put/seqid=0 2024-11-07T17:15:28,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741887_1063 (size=12151) 2024-11-07T17:15:28,783 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/0299fd4d7eab4e399a29037afdfcdd8e 2024-11-07T17:15:28,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/9a131fd12049464dae1436ac87f775a7 is 50, key is test_row_0/B:col10/1730999727971/Put/seqid=0 2024-11-07T17:15:28,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741888_1064 (size=12151) 2024-11-07T17:15:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:29,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999789115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999789117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999789118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999789118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999789120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999789224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999789223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999789224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999789224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999789225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,230 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/9a131fd12049464dae1436ac87f775a7 2024-11-07T17:15:29,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/71019d55740a49e8a0468bf0166f8ebe is 50, key is test_row_0/C:col10/1730999727971/Put/seqid=0 2024-11-07T17:15:29,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741889_1065 (size=12151) 2024-11-07T17:15:29,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999789429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999789429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999789430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999789430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999789431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:29,647 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/71019d55740a49e8a0468bf0166f8ebe 2024-11-07T17:15:29,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/0299fd4d7eab4e399a29037afdfcdd8e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/0299fd4d7eab4e399a29037afdfcdd8e 2024-11-07T17:15:29,664 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/0299fd4d7eab4e399a29037afdfcdd8e, entries=150, sequenceid=255, filesize=11.9 K 2024-11-07T17:15:29,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/9a131fd12049464dae1436ac87f775a7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/9a131fd12049464dae1436ac87f775a7 2024-11-07T17:15:29,674 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/9a131fd12049464dae1436ac87f775a7, entries=150, sequenceid=255, filesize=11.9 K 2024-11-07T17:15:29,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/71019d55740a49e8a0468bf0166f8ebe as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/71019d55740a49e8a0468bf0166f8ebe 2024-11-07T17:15:29,681 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/71019d55740a49e8a0468bf0166f8ebe, entries=150, sequenceid=255, filesize=11.9 K 2024-11-07T17:15:29,683 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 852ea2728c497a9e191625c6cb13c906 in 933ms, sequenceid=255, compaction requested=true 2024-11-07T17:15:29,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:29,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:29,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-07T17:15:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-07T17:15:29,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-07T17:15:29,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1800 sec 2024-11-07T17:15:29,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.1880 sec 2024-11-07T17:15:29,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:29,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-07T17:15:29,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:29,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:29,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:29,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:29,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:29,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:29,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/1ddf3ed330404dc1a312f65513a446cb is 50, key is test_row_0/A:col10/1730999729736/Put/seqid=0 2024-11-07T17:15:29,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741890_1066 (size=12301) 2024-11-07T17:15:29,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/1ddf3ed330404dc1a312f65513a446cb 2024-11-07T17:15:29,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999789755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999789757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999789758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999789759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999789760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/5c77a2b80dc74dc0ba35cf45d1657b5c is 50, key is test_row_0/B:col10/1730999729736/Put/seqid=0 2024-11-07T17:15:29,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741891_1067 (size=12301) 2024-11-07T17:15:29,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999789862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999789864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999789864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999789865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:29,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999789865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999790065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999790068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999790069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999790071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999790071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/5c77a2b80dc74dc0ba35cf45d1657b5c 2024-11-07T17:15:30,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/14bd30a5788a40b7b938081ac903703d is 50, key is test_row_0/C:col10/1730999729736/Put/seqid=0 2024-11-07T17:15:30,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741892_1068 (size=12301) 2024-11-07T17:15:30,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999790371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999790371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999790376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999790377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999790378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/14bd30a5788a40b7b938081ac903703d 2024-11-07T17:15:30,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/1ddf3ed330404dc1a312f65513a446cb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/1ddf3ed330404dc1a312f65513a446cb 2024-11-07T17:15:30,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/1ddf3ed330404dc1a312f65513a446cb, entries=150, sequenceid=274, filesize=12.0 K 2024-11-07T17:15:30,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/5c77a2b80dc74dc0ba35cf45d1657b5c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/5c77a2b80dc74dc0ba35cf45d1657b5c 2024-11-07T17:15:30,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/5c77a2b80dc74dc0ba35cf45d1657b5c, entries=150, sequenceid=274, filesize=12.0 K 2024-11-07T17:15:30,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/14bd30a5788a40b7b938081ac903703d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/14bd30a5788a40b7b938081ac903703d 2024-11-07T17:15:30,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/14bd30a5788a40b7b938081ac903703d, entries=150, sequenceid=274, filesize=12.0 K 2024-11-07T17:15:30,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 852ea2728c497a9e191625c6cb13c906 in 937ms, sequenceid=274, compaction requested=true 2024-11-07T17:15:30,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:30,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:30,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:30,673 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:30,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:30,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:30,674 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:30,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:30,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:30,675 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:30,675 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:30,675 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:30,676 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/141047d3510f42649e96065a39b1246f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/65c367a95c674ca9938cda40fa220a19, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/9a131fd12049464dae1436ac87f775a7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/5c77a2b80dc74dc0ba35cf45d1657b5c] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=48.1 K 2024-11-07T17:15:30,676 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51622 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:30,676 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:30,676 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:30,676 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a49a466b2c0b4338bab13e1728941add, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7c8d4867859c4155851a398670e3edf0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/0299fd4d7eab4e399a29037afdfcdd8e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/1ddf3ed330404dc1a312f65513a446cb] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=50.4 K 2024-11-07T17:15:30,677 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 141047d3510f42649e96065a39b1246f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730999726671 2024-11-07T17:15:30,677 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a49a466b2c0b4338bab13e1728941add, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730999726671 2024-11-07T17:15:30,677 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 65c367a95c674ca9938cda40fa220a19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1730999727317 2024-11-07T17:15:30,677 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c8d4867859c4155851a398670e3edf0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1730999727317 2024-11-07T17:15:30,678 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0299fd4d7eab4e399a29037afdfcdd8e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1730999727957 2024-11-07T17:15:30,678 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a131fd12049464dae1436ac87f775a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1730999727957 2024-11-07T17:15:30,679 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ddf3ed330404dc1a312f65513a446cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1730999729112 2024-11-07T17:15:30,679 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c77a2b80dc74dc0ba35cf45d1657b5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1730999729112 2024-11-07T17:15:30,700 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:30,702 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/816c63a6740a45628038fddbef43837c is 50, key is test_row_0/A:col10/1730999729736/Put/seqid=0 2024-11-07T17:15:30,707 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:30,708 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/4060fadf9395459db992d3efa5fed97e is 50, key is test_row_0/B:col10/1730999729736/Put/seqid=0 2024-11-07T17:15:30,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741893_1069 (size=12915) 2024-11-07T17:15:30,734 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/816c63a6740a45628038fddbef43837c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/816c63a6740a45628038fddbef43837c 2024-11-07T17:15:30,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741894_1070 (size=12915) 2024-11-07T17:15:30,744 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/4060fadf9395459db992d3efa5fed97e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4060fadf9395459db992d3efa5fed97e 2024-11-07T17:15:30,744 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 816c63a6740a45628038fddbef43837c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:30,745 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:30,745 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=12, startTime=1730999730673; duration=0sec 2024-11-07T17:15:30,745 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:30,745 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:30,745 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:30,748 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:30,749 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:30,749 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:30,749 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/800932983a914ffc885ee6a6174c756f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/667fee38343a4a4391585419bed85ca7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/71019d55740a49e8a0468bf0166f8ebe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/14bd30a5788a40b7b938081ac903703d] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=48.1 K 2024-11-07T17:15:30,749 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 800932983a914ffc885ee6a6174c756f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1730999726671 2024-11-07T17:15:30,750 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 667fee38343a4a4391585419bed85ca7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1730999727317 2024-11-07T17:15:30,752 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71019d55740a49e8a0468bf0166f8ebe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1730999727957 2024-11-07T17:15:30,753 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14bd30a5788a40b7b938081ac903703d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1730999729112 2024-11-07T17:15:30,756 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 4060fadf9395459db992d3efa5fed97e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:30,756 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:30,756 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=12, startTime=1730999730673; duration=0sec 2024-11-07T17:15:30,756 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:30,756 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:30,777 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#56 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:30,778 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b525123669dd4360a85cb5d7e875d6ea is 50, key is test_row_0/C:col10/1730999729736/Put/seqid=0 2024-11-07T17:15:30,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741895_1071 (size=12915) 2024-11-07T17:15:30,810 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b525123669dd4360a85cb5d7e875d6ea as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b525123669dd4360a85cb5d7e875d6ea 2024-11-07T17:15:30,825 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into b525123669dd4360a85cb5d7e875d6ea(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:30,825 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:30,825 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=12, startTime=1730999730674; duration=0sec 2024-11-07T17:15:30,825 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:30,825 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:30,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-07T17:15:30,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:30,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:30,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:30,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:30,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:30,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:30,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/9e0f574e7eb446fdb0d6006cab96bbaa is 50, key is test_row_0/A:col10/1730999730877/Put/seqid=0 2024-11-07T17:15:30,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999790907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999790907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999790908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999790909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:30,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999790911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:30,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741896_1072 (size=12301) 2024-11-07T17:15:31,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999791012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999791012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999791013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999791013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999791015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999791215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999791216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999791218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999791219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999791219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/9e0f574e7eb446fdb0d6006cab96bbaa 2024-11-07T17:15:31,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/301a2eb2ddf147288c7fe66e577d50ff is 50, key is test_row_0/B:col10/1730999730877/Put/seqid=0 2024-11-07T17:15:31,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741897_1073 (size=12301) 2024-11-07T17:15:31,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/301a2eb2ddf147288c7fe66e577d50ff 2024-11-07T17:15:31,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/6a44ebdf3c4b415eafadf8154103b8bf is 50, key is test_row_0/C:col10/1730999730877/Put/seqid=0 2024-11-07T17:15:31,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741898_1074 (size=12301) 2024-11-07T17:15:31,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/6a44ebdf3c4b415eafadf8154103b8bf 2024-11-07T17:15:31,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/9e0f574e7eb446fdb0d6006cab96bbaa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/9e0f574e7eb446fdb0d6006cab96bbaa 2024-11-07T17:15:31,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/9e0f574e7eb446fdb0d6006cab96bbaa, entries=150, sequenceid=295, filesize=12.0 K 2024-11-07T17:15:31,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/301a2eb2ddf147288c7fe66e577d50ff as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/301a2eb2ddf147288c7fe66e577d50ff 2024-11-07T17:15:31,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/301a2eb2ddf147288c7fe66e577d50ff, entries=150, sequenceid=295, filesize=12.0 K 2024-11-07T17:15:31,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/6a44ebdf3c4b415eafadf8154103b8bf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/6a44ebdf3c4b415eafadf8154103b8bf 2024-11-07T17:15:31,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/6a44ebdf3c4b415eafadf8154103b8bf, entries=150, sequenceid=295, filesize=12.0 K 2024-11-07T17:15:31,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 852ea2728c497a9e191625c6cb13c906 in 516ms, sequenceid=295, compaction requested=false 2024-11-07T17:15:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:31,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:31,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:15:31,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:31,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:31,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:31,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:31,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:31,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:31,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/b955d5b6f05c4553a3541bd89f955b17 is 50, key is test_row_0/A:col10/1730999731524/Put/seqid=0 2024-11-07T17:15:31,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999791536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999791539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999791539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741899_1075 (size=14741) 2024-11-07T17:15:31,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/b955d5b6f05c4553a3541bd89f955b17 2024-11-07T17:15:31,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999791541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999791541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/05d5250386ae4cf08606318a432cc60f is 50, key is test_row_0/B:col10/1730999731524/Put/seqid=0 2024-11-07T17:15:31,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741900_1076 (size=12301) 2024-11-07T17:15:31,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/05d5250386ae4cf08606318a432cc60f 2024-11-07T17:15:31,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/eba14396b66c484fafe83a1e619519a5 is 50, key is test_row_0/C:col10/1730999731524/Put/seqid=0 2024-11-07T17:15:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T17:15:31,617 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-07T17:15:31,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-07T17:15:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T17:15:31,622 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:31,623 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:31,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:31,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741901_1077 (size=12301) 2024-11-07T17:15:31,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/eba14396b66c484fafe83a1e619519a5 2024-11-07T17:15:31,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/b955d5b6f05c4553a3541bd89f955b17 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b955d5b6f05c4553a3541bd89f955b17 2024-11-07T17:15:31,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999791643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999791644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999791643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999791645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999791645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b955d5b6f05c4553a3541bd89f955b17, entries=200, sequenceid=315, filesize=14.4 K 2024-11-07T17:15:31,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/05d5250386ae4cf08606318a432cc60f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/05d5250386ae4cf08606318a432cc60f 2024-11-07T17:15:31,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/05d5250386ae4cf08606318a432cc60f, entries=150, sequenceid=315, filesize=12.0 K 2024-11-07T17:15:31,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/eba14396b66c484fafe83a1e619519a5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/eba14396b66c484fafe83a1e619519a5 2024-11-07T17:15:31,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/eba14396b66c484fafe83a1e619519a5, entries=150, sequenceid=315, filesize=12.0 K 2024-11-07T17:15:31,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 852ea2728c497a9e191625c6cb13c906 in 151ms, sequenceid=315, compaction requested=true 2024-11-07T17:15:31,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:31,676 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:31,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:31,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:31,677 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:31,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:31,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:31,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:31,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:31,679 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:31,679 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:31,679 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:31,679 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:31,679 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:31,679 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:31,679 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/816c63a6740a45628038fddbef43837c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/9e0f574e7eb446fdb0d6006cab96bbaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b955d5b6f05c4553a3541bd89f955b17] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=39.0 K 2024-11-07T17:15:31,679 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4060fadf9395459db992d3efa5fed97e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/301a2eb2ddf147288c7fe66e577d50ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/05d5250386ae4cf08606318a432cc60f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.6 K 2024-11-07T17:15:31,680 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 816c63a6740a45628038fddbef43837c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1730999729112 2024-11-07T17:15:31,680 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4060fadf9395459db992d3efa5fed97e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1730999729112 2024-11-07T17:15:31,681 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e0f574e7eb446fdb0d6006cab96bbaa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1730999729757 2024-11-07T17:15:31,681 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 301a2eb2ddf147288c7fe66e577d50ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1730999729757 2024-11-07T17:15:31,681 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b955d5b6f05c4553a3541bd89f955b17, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1730999730900 2024-11-07T17:15:31,681 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 05d5250386ae4cf08606318a432cc60f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1730999730907 2024-11-07T17:15:31,702 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:31,702 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#64 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:31,707 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ccfd052970e04f5482d5ceeb51e3af21 is 50, key is test_row_0/B:col10/1730999731524/Put/seqid=0 2024-11-07T17:15:31,707 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/2a4dba6a571d4081bf026c68b39393cf is 50, key is test_row_0/A:col10/1730999731524/Put/seqid=0 2024-11-07T17:15:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T17:15:31,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741903_1079 (size=13017) 2024-11-07T17:15:31,737 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ccfd052970e04f5482d5ceeb51e3af21 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ccfd052970e04f5482d5ceeb51e3af21 2024-11-07T17:15:31,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741902_1078 (size=13017) 2024-11-07T17:15:31,749 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into ccfd052970e04f5482d5ceeb51e3af21(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:31,749 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:31,749 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=13, startTime=1730999731677; duration=0sec 2024-11-07T17:15:31,749 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:31,749 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:31,750 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:31,752 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/2a4dba6a571d4081bf026c68b39393cf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2a4dba6a571d4081bf026c68b39393cf 2024-11-07T17:15:31,753 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:31,753 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:31,753 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:31,754 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b525123669dd4360a85cb5d7e875d6ea, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/6a44ebdf3c4b415eafadf8154103b8bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/eba14396b66c484fafe83a1e619519a5] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.6 K 2024-11-07T17:15:31,754 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b525123669dd4360a85cb5d7e875d6ea, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1730999729112 2024-11-07T17:15:31,755 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a44ebdf3c4b415eafadf8154103b8bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1730999729757 2024-11-07T17:15:31,756 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting eba14396b66c484fafe83a1e619519a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1730999730907 2024-11-07T17:15:31,760 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 2a4dba6a571d4081bf026c68b39393cf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:31,760 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:31,760 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=13, startTime=1730999731676; duration=0sec 2024-11-07T17:15:31,760 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:31,760 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:31,777 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:31,778 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:31,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:31,782 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#65 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:31,785 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/663f4e8aaa7b4cf3807429a11cf84ccb is 50, key is test_row_0/C:col10/1730999731524/Put/seqid=0 2024-11-07T17:15:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/b302196dcf754a26aa968d4fb81beec1 is 50, key is test_row_0/A:col10/1730999731539/Put/seqid=0 2024-11-07T17:15:31,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741904_1080 (size=12301) 2024-11-07T17:15:31,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741905_1081 (size=13017) 2024-11-07T17:15:31,846 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/663f4e8aaa7b4cf3807429a11cf84ccb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/663f4e8aaa7b4cf3807429a11cf84ccb 2024-11-07T17:15:31,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:31,857 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into 663f4e8aaa7b4cf3807429a11cf84ccb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:31,857 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:31,857 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=13, startTime=1730999731677; duration=0sec 2024-11-07T17:15:31,857 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:31,857 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:31,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999791877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999791878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999791881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999791882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999791883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T17:15:31,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999791984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999791985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999791987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999791996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:31,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999791998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999792188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999792190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999792191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999792199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999792199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T17:15:32,231 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/b302196dcf754a26aa968d4fb81beec1 2024-11-07T17:15:32,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/bb2d25087f0c471faef5172ab1d04d6c is 50, key is test_row_0/B:col10/1730999731539/Put/seqid=0 2024-11-07T17:15:32,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741906_1082 (size=12301) 2024-11-07T17:15:32,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999792491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999792493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999792495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999792502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999792503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:32,651 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/bb2d25087f0c471faef5172ab1d04d6c 2024-11-07T17:15:32,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b6e8dbc92cc846578a6ff97b421fd43a is 50, key is test_row_0/C:col10/1730999731539/Put/seqid=0 2024-11-07T17:15:32,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741907_1083 (size=12301) 2024-11-07T17:15:32,680 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b6e8dbc92cc846578a6ff97b421fd43a 2024-11-07T17:15:32,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/b302196dcf754a26aa968d4fb81beec1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b302196dcf754a26aa968d4fb81beec1 2024-11-07T17:15:32,705 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b302196dcf754a26aa968d4fb81beec1, entries=150, sequenceid=333, filesize=12.0 K 2024-11-07T17:15:32,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/bb2d25087f0c471faef5172ab1d04d6c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bb2d25087f0c471faef5172ab1d04d6c 2024-11-07T17:15:32,721 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bb2d25087f0c471faef5172ab1d04d6c, entries=150, sequenceid=333, filesize=12.0 K 2024-11-07T17:15:32,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b6e8dbc92cc846578a6ff97b421fd43a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b6e8dbc92cc846578a6ff97b421fd43a 2024-11-07T17:15:32,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T17:15:32,731 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b6e8dbc92cc846578a6ff97b421fd43a, entries=150, sequenceid=333, filesize=12.0 K 2024-11-07T17:15:32,732 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 852ea2728c497a9e191625c6cb13c906 in 954ms, sequenceid=333, compaction requested=false 2024-11-07T17:15:32,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:32,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:32,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-07T17:15:32,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-07T17:15:32,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-07T17:15:32,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1110 sec 2024-11-07T17:15:32,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.1180 sec 2024-11-07T17:15:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:32,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:15:32,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:32,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:32,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:32,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:32,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:32,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:33,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/711eaedab86244f19b70cfdfd8a49985 is 50, key is test_row_0/A:col10/1730999731874/Put/seqid=0 2024-11-07T17:15:33,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999793014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999793014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999793016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999793018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999793016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741908_1084 (size=14741) 2024-11-07T17:15:33,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/711eaedab86244f19b70cfdfd8a49985 2024-11-07T17:15:33,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ec79e4d2e1d442f09e5d172680b02b78 is 50, key is test_row_0/B:col10/1730999731874/Put/seqid=0 2024-11-07T17:15:33,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741909_1085 (size=12301) 2024-11-07T17:15:33,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999793121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999793121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999793124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999793124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999793128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999793323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999793324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999793328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999793330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999793331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ec79e4d2e1d442f09e5d172680b02b78 2024-11-07T17:15:33,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7024083885f04a82afad72034ecc08c7 is 50, key is test_row_0/C:col10/1730999731874/Put/seqid=0 2024-11-07T17:15:33,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741910_1086 (size=12301) 2024-11-07T17:15:33,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999793627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999793628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999793632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999793632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:33,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999793632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T17:15:33,727 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-07T17:15:33,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:33,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-07T17:15:33,734 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:33,735 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:33,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:33,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T17:15:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T17:15:33,889 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:33,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-07T17:15:33,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:33,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:33,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:33,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:33,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:33,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:33,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7024083885f04a82afad72034ecc08c7 2024-11-07T17:15:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/711eaedab86244f19b70cfdfd8a49985 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/711eaedab86244f19b70cfdfd8a49985 2024-11-07T17:15:33,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/711eaedab86244f19b70cfdfd8a49985, entries=200, sequenceid=356, filesize=14.4 K 2024-11-07T17:15:33,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ec79e4d2e1d442f09e5d172680b02b78 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ec79e4d2e1d442f09e5d172680b02b78 2024-11-07T17:15:33,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ec79e4d2e1d442f09e5d172680b02b78, entries=150, sequenceid=356, filesize=12.0 K 2024-11-07T17:15:33,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/7024083885f04a82afad72034ecc08c7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7024083885f04a82afad72034ecc08c7 2024-11-07T17:15:33,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7024083885f04a82afad72034ecc08c7, entries=150, sequenceid=356, filesize=12.0 K 2024-11-07T17:15:33,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 852ea2728c497a9e191625c6cb13c906 in 963ms, sequenceid=356, compaction requested=true 2024-11-07T17:15:33,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:33,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:33,962 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:33,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:33,962 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:33,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:33,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:33,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:33,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:33,963 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:33,963 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:33,963 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:33,963 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2a4dba6a571d4081bf026c68b39393cf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b302196dcf754a26aa968d4fb81beec1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/711eaedab86244f19b70cfdfd8a49985] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=39.1 K 2024-11-07T17:15:33,964 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:33,964 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:33,964 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a4dba6a571d4081bf026c68b39393cf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1730999730907 2024-11-07T17:15:33,964 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:33,964 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ccfd052970e04f5482d5ceeb51e3af21, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bb2d25087f0c471faef5172ab1d04d6c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ec79e4d2e1d442f09e5d172680b02b78] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.7 K 2024-11-07T17:15:33,964 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b302196dcf754a26aa968d4fb81beec1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1730999731531 2024-11-07T17:15:33,966 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 711eaedab86244f19b70cfdfd8a49985, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1730999731874 2024-11-07T17:15:33,966 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ccfd052970e04f5482d5ceeb51e3af21, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1730999730907 2024-11-07T17:15:33,967 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting bb2d25087f0c471faef5172ab1d04d6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1730999731531 2024-11-07T17:15:33,968 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ec79e4d2e1d442f09e5d172680b02b78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1730999731874 2024-11-07T17:15:33,983 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:33,983 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c3fa624f740549cf8ad067a416f47da7 is 50, key is test_row_0/A:col10/1730999731874/Put/seqid=0 2024-11-07T17:15:33,986 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:33,987 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/4405b68c67104189bea93839c28bd1bd is 50, key is test_row_0/B:col10/1730999731874/Put/seqid=0 2024-11-07T17:15:33,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741912_1088 (size=13119) 2024-11-07T17:15:34,009 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/4405b68c67104189bea93839c28bd1bd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4405b68c67104189bea93839c28bd1bd 2024-11-07T17:15:34,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741911_1087 (size=13119) 2024-11-07T17:15:34,021 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/c3fa624f740549cf8ad067a416f47da7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c3fa624f740549cf8ad067a416f47da7 2024-11-07T17:15:34,021 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 4405b68c67104189bea93839c28bd1bd(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:34,021 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,022 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=13, startTime=1730999733962; duration=0sec 2024-11-07T17:15:34,022 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:34,022 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:34,022 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:34,023 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:34,024 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:34,024 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,024 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/663f4e8aaa7b4cf3807429a11cf84ccb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b6e8dbc92cc846578a6ff97b421fd43a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7024083885f04a82afad72034ecc08c7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.7 K 2024-11-07T17:15:34,025 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 663f4e8aaa7b4cf3807429a11cf84ccb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1730999730907 2024-11-07T17:15:34,026 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b6e8dbc92cc846578a6ff97b421fd43a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1730999731531 2024-11-07T17:15:34,027 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 7024083885f04a82afad72034ecc08c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1730999731874 2024-11-07T17:15:34,029 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into c3fa624f740549cf8ad067a416f47da7(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:34,030 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,030 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=13, startTime=1730999733962; duration=0sec 2024-11-07T17:15:34,030 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:34,030 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T17:15:34,040 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#74 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:34,041 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/c6e85087fa094976a98cfa4af51c7a64 is 50, key is test_row_0/C:col10/1730999731874/Put/seqid=0 2024-11-07T17:15:34,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-07T17:15:34,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,046 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:15:34,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:34,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:34,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:34,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/11c10215d41f4cd48dfc4fc69219c01e is 50, key is test_row_0/A:col10/1730999733016/Put/seqid=0 2024-11-07T17:15:34,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741913_1089 (size=13119) 2024-11-07T17:15:34,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741914_1090 (size=12301) 2024-11-07T17:15:34,091 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/11c10215d41f4cd48dfc4fc69219c01e 2024-11-07T17:15:34,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/82a334885ef14a159027299cbcf79e0e is 50, key is test_row_0/B:col10/1730999733016/Put/seqid=0 2024-11-07T17:15:34,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741915_1091 (size=12301) 2024-11-07T17:15:34,113 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/82a334885ef14a159027299cbcf79e0e 2024-11-07T17:15:34,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e3358951dd2d4774a324af1a5e0d41f5 is 50, key is test_row_0/C:col10/1730999733016/Put/seqid=0 2024-11-07T17:15:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741916_1092 (size=12301) 2024-11-07T17:15:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:34,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:34,136 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e3358951dd2d4774a324af1a5e0d41f5 2024-11-07T17:15:34,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/11c10215d41f4cd48dfc4fc69219c01e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/11c10215d41f4cd48dfc4fc69219c01e 2024-11-07T17:15:34,153 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/11c10215d41f4cd48dfc4fc69219c01e, entries=150, sequenceid=372, filesize=12.0 K 2024-11-07T17:15:34,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/82a334885ef14a159027299cbcf79e0e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/82a334885ef14a159027299cbcf79e0e 2024-11-07T17:15:34,163 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/82a334885ef14a159027299cbcf79e0e, entries=150, sequenceid=372, filesize=12.0 K 2024-11-07T17:15:34,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e3358951dd2d4774a324af1a5e0d41f5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e3358951dd2d4774a324af1a5e0d41f5 2024-11-07T17:15:34,170 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e3358951dd2d4774a324af1a5e0d41f5, entries=150, sequenceid=372, filesize=12.0 K 2024-11-07T17:15:34,209 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=120.76 KB/123660 for 852ea2728c497a9e191625c6cb13c906 in 163ms, sequenceid=372, compaction requested=false 2024-11-07T17:15:34,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-07T17:15:34,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:34,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T17:15:34,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-07T17:15:34,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:34,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:34,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:34,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-07T17:15:34,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 479 msec 2024-11-07T17:15:34,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/64446b71e2944ab2982013a54b00372b is 50, key is test_row_0/A:col10/1730999734210/Put/seqid=0 2024-11-07T17:15:34,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 490 msec 2024-11-07T17:15:34,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741917_1093 (size=17181) 2024-11-07T17:15:34,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/64446b71e2944ab2982013a54b00372b 2024-11-07T17:15:34,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999794222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999794222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999794222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999794223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999794224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ed1ea9c826ae43e9810adafe62e5809a is 50, key is test_row_0/B:col10/1730999734210/Put/seqid=0 2024-11-07T17:15:34,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741918_1094 (size=12301) 2024-11-07T17:15:34,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ed1ea9c826ae43e9810adafe62e5809a 2024-11-07T17:15:34,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/4cc4bdbc000b45fbbf8f32d257ee2b02 is 50, key is test_row_0/C:col10/1730999734210/Put/seqid=0 2024-11-07T17:15:34,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741919_1095 (size=12301) 2024-11-07T17:15:34,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/4cc4bdbc000b45fbbf8f32d257ee2b02 2024-11-07T17:15:34,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/64446b71e2944ab2982013a54b00372b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/64446b71e2944ab2982013a54b00372b 2024-11-07T17:15:34,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/64446b71e2944ab2982013a54b00372b, entries=250, sequenceid=398, filesize=16.8 K 2024-11-07T17:15:34,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/ed1ea9c826ae43e9810adafe62e5809a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ed1ea9c826ae43e9810adafe62e5809a 2024-11-07T17:15:34,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ed1ea9c826ae43e9810adafe62e5809a, entries=150, sequenceid=398, filesize=12.0 K 2024-11-07T17:15:34,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/4cc4bdbc000b45fbbf8f32d257ee2b02 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/4cc4bdbc000b45fbbf8f32d257ee2b02 2024-11-07T17:15:34,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/4cc4bdbc000b45fbbf8f32d257ee2b02, entries=150, sequenceid=398, filesize=12.0 K 2024-11-07T17:15:34,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T17:15:34,338 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-07T17:15:34,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:34,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 852ea2728c497a9e191625c6cb13c906 in 129ms, sequenceid=398, compaction requested=true 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:34,340 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:15:34,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-07T17:15:34,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:34,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T17:15:34,341 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:34,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T17:15:34,343 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:34,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:34,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:34,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:34,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:34,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:34,345 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42601 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:34,345 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:34,345 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,345 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c3fa624f740549cf8ad067a416f47da7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/11c10215d41f4cd48dfc4fc69219c01e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/64446b71e2944ab2982013a54b00372b] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=41.6 K 2024-11-07T17:15:34,346 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3fa624f740549cf8ad067a416f47da7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1730999731874 2024-11-07T17:15:34,346 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11c10215d41f4cd48dfc4fc69219c01e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1730999733002 2024-11-07T17:15:34,347 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64446b71e2944ab2982013a54b00372b, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1730999734156 2024-11-07T17:15:34,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/ec787500da6141d5b8fed3409b465d1f is 50, key is test_row_0/A:col10/1730999734338/Put/seqid=0 2024-11-07T17:15:34,379 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#82 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:34,380 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/53157fe3274f445aa4a09f1d47ead136 is 50, key is test_row_0/A:col10/1730999734210/Put/seqid=0 2024-11-07T17:15:34,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741920_1096 (size=17181) 2024-11-07T17:15:34,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999794391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999794400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741921_1097 (size=13221) 2024-11-07T17:15:34,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999794402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999794402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999794403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,414 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/53157fe3274f445aa4a09f1d47ead136 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/53157fe3274f445aa4a09f1d47ead136 2024-11-07T17:15:34,421 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 53157fe3274f445aa4a09f1d47ead136(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:34,421 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,421 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=13, startTime=1730999734340; duration=0sec 2024-11-07T17:15:34,421 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:15:34,421 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:34,421 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-11-07T17:15:34,422 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T17:15:34,422 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T17:15:34,422 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. because compaction request was cancelled 2024-11-07T17:15:34,422 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:34,423 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:34,425 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:34,425 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:34,425 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,425 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4405b68c67104189bea93839c28bd1bd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/82a334885ef14a159027299cbcf79e0e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ed1ea9c826ae43e9810adafe62e5809a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=36.8 K 2024-11-07T17:15:34,426 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4405b68c67104189bea93839c28bd1bd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1730999731874 2024-11-07T17:15:34,426 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82a334885ef14a159027299cbcf79e0e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1730999733002 2024-11-07T17:15:34,427 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed1ea9c826ae43e9810adafe62e5809a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1730999734168 2024-11-07T17:15:34,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T17:15:34,449 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#83 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:34,450 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/677b57d6777f47a281ebad1457c44c6b is 50, key is test_row_0/B:col10/1730999734210/Put/seqid=0 2024-11-07T17:15:34,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741922_1098 (size=13221) 2024-11-07T17:15:34,491 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/c6e85087fa094976a98cfa4af51c7a64 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c6e85087fa094976a98cfa4af51c7a64 2024-11-07T17:15:34,495 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:34,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:34,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,503 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into c6e85087fa094976a98cfa4af51c7a64(size=12.8 K), total size for store is 36.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:34,503 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,503 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=13, startTime=1730999733962; duration=0sec 2024-11-07T17:15:34,503 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:34,503 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:34,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999794506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999794506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999794508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999794511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999794511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T17:15:34,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:34,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:34,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999794714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999794715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999794716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999794717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:34,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999794718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/ec787500da6141d5b8fed3409b465d1f 2024-11-07T17:15:34,807 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:34,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:34,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/8efa26b2ec0b4457b309ed49585bba57 is 50, key is test_row_0/B:col10/1730999734338/Put/seqid=0 2024-11-07T17:15:34,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741923_1099 (size=12301) 2024-11-07T17:15:34,885 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/677b57d6777f47a281ebad1457c44c6b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/677b57d6777f47a281ebad1457c44c6b 2024-11-07T17:15:34,896 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 677b57d6777f47a281ebad1457c44c6b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:34,896 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:34,896 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=13, startTime=1730999734340; duration=0sec 2024-11-07T17:15:34,896 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:34,896 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T17:15:34,963 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:34,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:34,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:34,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:34,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999795016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999795020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999795022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999795022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999795026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,117 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:35,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:35,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/8efa26b2ec0b4457b309ed49585bba57 2024-11-07T17:15:35,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/0af7cf675388423191e480d80666d52b is 50, key is test_row_0/C:col10/1730999734338/Put/seqid=0 2024-11-07T17:15:35,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741924_1100 (size=12301) 2024-11-07T17:15:35,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/0af7cf675388423191e480d80666d52b 2024-11-07T17:15:35,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:35,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:35,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:35,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/ec787500da6141d5b8fed3409b465d1f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/ec787500da6141d5b8fed3409b465d1f 2024-11-07T17:15:35,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/ec787500da6141d5b8fed3409b465d1f, entries=250, sequenceid=411, filesize=16.8 K 2024-11-07T17:15:35,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/8efa26b2ec0b4457b309ed49585bba57 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8efa26b2ec0b4457b309ed49585bba57 2024-11-07T17:15:35,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8efa26b2ec0b4457b309ed49585bba57, entries=150, sequenceid=411, filesize=12.0 K 2024-11-07T17:15:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/0af7cf675388423191e480d80666d52b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0af7cf675388423191e480d80666d52b 2024-11-07T17:15:35,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0af7cf675388423191e480d80666d52b, entries=150, sequenceid=411, filesize=12.0 K 2024-11-07T17:15:35,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 852ea2728c497a9e191625c6cb13c906 in 977ms, sequenceid=411, compaction requested=true 2024-11-07T17:15:35,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:35,319 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T17:15:35,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:35,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:35,320 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T17:15:35,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:35,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:35,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:35,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:35,320 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T17:15:35,320 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T17:15:35,320 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. because compaction request was cancelled 2024-11-07T17:15:35,320 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:35,320 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:35,321 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T17:15:35,321 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T17:15:35,321 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. because compaction request was cancelled 2024-11-07T17:15:35,321 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:35,322 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:35,322 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:35,322 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,322 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c6e85087fa094976a98cfa4af51c7a64, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e3358951dd2d4774a324af1a5e0d41f5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/4cc4bdbc000b45fbbf8f32d257ee2b02, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0af7cf675388423191e480d80666d52b] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=48.8 K 2024-11-07T17:15:35,323 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6e85087fa094976a98cfa4af51c7a64, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1730999731874 2024-11-07T17:15:35,324 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3358951dd2d4774a324af1a5e0d41f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1730999733002 2024-11-07T17:15:35,326 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cc4bdbc000b45fbbf8f32d257ee2b02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1730999734168 2024-11-07T17:15:35,327 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0af7cf675388423191e480d80666d52b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1730999734221 2024-11-07T17:15:35,344 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#86 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:35,345 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b9c5cda93e004d07ac5fbd1d8388f3eb is 50, key is test_row_0/C:col10/1730999734338/Put/seqid=0 2024-11-07T17:15:35,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741925_1101 (size=13255) 2024-11-07T17:15:35,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T17:15:35,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,424 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T17:15:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:35,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/10cf8db124ed46588304ed100c7c88d3 is 50, key is test_row_0/A:col10/1730999734386/Put/seqid=0 2024-11-07T17:15:35,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741926_1102 (size=12301) 2024-11-07T17:15:35,447 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/10cf8db124ed46588304ed100c7c88d3 2024-11-07T17:15:35,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T17:15:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/e6fa8db01c9942acb840bf5e06e31b01 is 50, key is test_row_0/B:col10/1730999734386/Put/seqid=0 2024-11-07T17:15:35,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741927_1103 (size=12301) 2024-11-07T17:15:35,495 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/e6fa8db01c9942acb840bf5e06e31b01 2024-11-07T17:15:35,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/9da14904ada44068bb3cab8b18b53ba9 is 50, key is test_row_0/C:col10/1730999734386/Put/seqid=0 2024-11-07T17:15:35,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741928_1104 (size=12301) 2024-11-07T17:15:35,521 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/9da14904ada44068bb3cab8b18b53ba9 2024-11-07T17:15:35,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:35,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/10cf8db124ed46588304ed100c7c88d3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/10cf8db124ed46588304ed100c7c88d3 2024-11-07T17:15:35,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999795535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999795533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999795536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999795540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,544 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/10cf8db124ed46588304ed100c7c88d3, entries=150, sequenceid=438, filesize=12.0 K 2024-11-07T17:15:35,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999795541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/e6fa8db01c9942acb840bf5e06e31b01 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/e6fa8db01c9942acb840bf5e06e31b01 2024-11-07T17:15:35,553 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/e6fa8db01c9942acb840bf5e06e31b01, entries=150, sequenceid=438, filesize=12.0 K 2024-11-07T17:15:35,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/9da14904ada44068bb3cab8b18b53ba9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/9da14904ada44068bb3cab8b18b53ba9 2024-11-07T17:15:35,560 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/9da14904ada44068bb3cab8b18b53ba9, entries=150, sequenceid=438, filesize=12.0 K 2024-11-07T17:15:35,562 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 852ea2728c497a9e191625c6cb13c906 in 138ms, sequenceid=438, compaction requested=true 2024-11-07T17:15:35,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:35,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:35,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-07T17:15:35,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-07T17:15:35,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-07T17:15:35,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2220 sec 2024-11-07T17:15:35,568 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.2270 sec 2024-11-07T17:15:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:35,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T17:15:35,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:35,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:35,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:35,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:35,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:35,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:35,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/2b1f7ed46cb44807a06975fa65776747 is 50, key is test_row_0/A:col10/1730999735647/Put/seqid=0 2024-11-07T17:15:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741929_1105 (size=12301) 2024-11-07T17:15:35,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999795713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999795713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999795713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999795714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999795714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,779 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b9c5cda93e004d07ac5fbd1d8388f3eb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b9c5cda93e004d07ac5fbd1d8388f3eb 2024-11-07T17:15:35,789 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into b9c5cda93e004d07ac5fbd1d8388f3eb(size=12.9 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:35,789 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:35,789 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=12, startTime=1730999735320; duration=0sec 2024-11-07T17:15:35,789 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:35,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:35,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999795820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999795820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999795821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999795821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:35,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999795821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999796023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999796025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999796025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999796025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999796030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/2b1f7ed46cb44807a06975fa65776747 2024-11-07T17:15:36,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/278d855049274f2bb59cd7a7446573ac is 50, key is test_row_0/B:col10/1730999735647/Put/seqid=0 2024-11-07T17:15:36,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741930_1106 (size=12301) 2024-11-07T17:15:36,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/278d855049274f2bb59cd7a7446573ac 2024-11-07T17:15:36,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b0489813d1774583a1c16735919e8160 is 50, key is test_row_0/C:col10/1730999735647/Put/seqid=0 2024-11-07T17:15:36,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741931_1107 (size=12301) 2024-11-07T17:15:36,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b0489813d1774583a1c16735919e8160 2024-11-07T17:15:36,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/2b1f7ed46cb44807a06975fa65776747 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2b1f7ed46cb44807a06975fa65776747 2024-11-07T17:15:36,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2b1f7ed46cb44807a06975fa65776747, entries=150, sequenceid=452, filesize=12.0 K 2024-11-07T17:15:36,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/278d855049274f2bb59cd7a7446573ac as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/278d855049274f2bb59cd7a7446573ac 2024-11-07T17:15:36,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/278d855049274f2bb59cd7a7446573ac, entries=150, sequenceid=452, filesize=12.0 K 2024-11-07T17:15:36,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b0489813d1774583a1c16735919e8160 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0489813d1774583a1c16735919e8160 2024-11-07T17:15:36,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0489813d1774583a1c16735919e8160, entries=150, sequenceid=452, filesize=12.0 K 2024-11-07T17:15:36,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 852ea2728c497a9e191625c6cb13c906 in 541ms, sequenceid=452, compaction requested=true 2024-11-07T17:15:36,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:36,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:36,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:36,189 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:36,189 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:36,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:36,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:36,191 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:36,191 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:36,191 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:36,191 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:36,192 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,192 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,192 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/53157fe3274f445aa4a09f1d47ead136, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/ec787500da6141d5b8fed3409b465d1f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/10cf8db124ed46588304ed100c7c88d3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2b1f7ed46cb44807a06975fa65776747] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=53.7 K 2024-11-07T17:15:36,192 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/677b57d6777f47a281ebad1457c44c6b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8efa26b2ec0b4457b309ed49585bba57, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/e6fa8db01c9942acb840bf5e06e31b01, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/278d855049274f2bb59cd7a7446573ac] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=48.9 K 2024-11-07T17:15:36,192 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53157fe3274f445aa4a09f1d47ead136, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1730999734168 2024-11-07T17:15:36,193 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec787500da6141d5b8fed3409b465d1f, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1730999734221 2024-11-07T17:15:36,193 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 677b57d6777f47a281ebad1457c44c6b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1730999734168 2024-11-07T17:15:36,193 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10cf8db124ed46588304ed100c7c88d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1730999734386 2024-11-07T17:15:36,194 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8efa26b2ec0b4457b309ed49585bba57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1730999734221 2024-11-07T17:15:36,194 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b1f7ed46cb44807a06975fa65776747, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1730999735533 2024-11-07T17:15:36,194 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e6fa8db01c9942acb840bf5e06e31b01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1730999734386 2024-11-07T17:15:36,195 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 278d855049274f2bb59cd7a7446573ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1730999735533 2024-11-07T17:15:36,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:36,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:36,209 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#93 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:36,210 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/6ed68a1f03194ee1baf3ebd4a2e62355 is 50, key is test_row_0/B:col10/1730999735647/Put/seqid=0 2024-11-07T17:15:36,210 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:36,211 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/f5e219b358e44f65952506c6d88e199f is 50, key is test_row_0/A:col10/1730999735647/Put/seqid=0 2024-11-07T17:15:36,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741932_1108 (size=13357) 2024-11-07T17:15:36,239 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/6ed68a1f03194ee1baf3ebd4a2e62355 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/6ed68a1f03194ee1baf3ebd4a2e62355 2024-11-07T17:15:36,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741933_1109 (size=13357) 2024-11-07T17:15:36,247 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 6ed68a1f03194ee1baf3ebd4a2e62355(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:36,247 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:36,248 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=12, startTime=1730999736189; duration=0sec 2024-11-07T17:15:36,248 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:36,248 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:36,248 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:36,250 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:36,250 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:36,250 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,250 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b9c5cda93e004d07ac5fbd1d8388f3eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/9da14904ada44068bb3cab8b18b53ba9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0489813d1774583a1c16735919e8160] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=37.0 K 2024-11-07T17:15:36,251 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/f5e219b358e44f65952506c6d88e199f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/f5e219b358e44f65952506c6d88e199f 2024-11-07T17:15:36,251 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b9c5cda93e004d07ac5fbd1d8388f3eb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1730999734221 2024-11-07T17:15:36,252 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9da14904ada44068bb3cab8b18b53ba9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1730999734386 2024-11-07T17:15:36,252 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b0489813d1774583a1c16735919e8160, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1730999735533 2024-11-07T17:15:36,257 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into f5e219b358e44f65952506c6d88e199f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:36,258 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:36,258 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=12, startTime=1730999736189; duration=0sec 2024-11-07T17:15:36,258 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:36,258 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:36,274 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:36,275 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/a4da75b3807546b28d91def410867520 is 50, key is test_row_0/C:col10/1730999735647/Put/seqid=0 2024-11-07T17:15:36,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741934_1110 (size=13357) 2024-11-07T17:15:36,291 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/a4da75b3807546b28d91def410867520 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/a4da75b3807546b28d91def410867520 2024-11-07T17:15:36,298 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into a4da75b3807546b28d91def410867520(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:36,298 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:36,298 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=13, startTime=1730999736196; duration=0sec 2024-11-07T17:15:36,298 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:36,299 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:36,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T17:15:36,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:36,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:36,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:36,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:36,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:36,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:36,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/adccef0791e74db0ad342cc15d883920 is 50, key is test_row_0/A:col10/1730999736329/Put/seqid=0 2024-11-07T17:15:36,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741935_1111 (size=12301) 2024-11-07T17:15:36,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999796343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999796384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999796384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999796385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999796385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T17:15:36,449 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-07T17:15:36,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-07T17:15:36,454 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T17:15:36,457 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:36,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:36,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999796486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999796489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999796490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999796490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999796490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T17:15:36,613 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-07T17:15:36,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:36,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999796689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999796693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999796693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999796694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999796694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T17:15:36,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/adccef0791e74db0ad342cc15d883920 2024-11-07T17:15:36,767 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-07T17:15:36,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:36,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/20069430f4af49a8877e13f24c5026fe is 50, key is test_row_0/B:col10/1730999736329/Put/seqid=0 2024-11-07T17:15:36,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741936_1112 (size=12301) 2024-11-07T17:15:36,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/20069430f4af49a8877e13f24c5026fe 2024-11-07T17:15:36,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/0cfa2926af6348c581f79bbbfba925ed is 50, key is test_row_0/C:col10/1730999736329/Put/seqid=0 2024-11-07T17:15:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741937_1113 (size=12301) 2024-11-07T17:15:36,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-07T17:15:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:36,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:36,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999796991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999796996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999796996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999796997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:36,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:36,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999796997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T17:15:37,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-07T17:15:37,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:37,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:37,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:37,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:37,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:37,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/0cfa2926af6348c581f79bbbfba925ed 2024-11-07T17:15:37,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/adccef0791e74db0ad342cc15d883920 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/adccef0791e74db0ad342cc15d883920 2024-11-07T17:15:37,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/adccef0791e74db0ad342cc15d883920, entries=150, sequenceid=480, filesize=12.0 K 2024-11-07T17:15:37,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/20069430f4af49a8877e13f24c5026fe as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/20069430f4af49a8877e13f24c5026fe 2024-11-07T17:15:37,227 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-07T17:15:37,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:37,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:37,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:37,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:37,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:37,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/20069430f4af49a8877e13f24c5026fe, entries=150, sequenceid=480, filesize=12.0 K 2024-11-07T17:15:37,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/0cfa2926af6348c581f79bbbfba925ed as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0cfa2926af6348c581f79bbbfba925ed 2024-11-07T17:15:37,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0cfa2926af6348c581f79bbbfba925ed, entries=150, sequenceid=480, filesize=12.0 K 2024-11-07T17:15:37,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 852ea2728c497a9e191625c6cb13c906 in 914ms, sequenceid=480, compaction requested=false 2024-11-07T17:15:37,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:37,380 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-07T17:15:37,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:37,381 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:15:37,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:37,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:37,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:37,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:37,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:37,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:37,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/a6fd25756e324a32ac6640f696b98c59 is 50, key is test_row_0/A:col10/1730999736384/Put/seqid=0 2024-11-07T17:15:37,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741938_1114 (size=12301) 2024-11-07T17:15:37,394 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/a6fd25756e324a32ac6640f696b98c59 2024-11-07T17:15:37,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/f837cee91c774e338a3982850aa1ba3d is 50, key is test_row_0/B:col10/1730999736384/Put/seqid=0 2024-11-07T17:15:37,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741939_1115 (size=12301) 2024-11-07T17:15:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:37,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:37,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999797515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999797516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999797518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999797519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999797520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T17:15:37,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999797621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999797622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999797622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999797622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999797622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,810 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/f837cee91c774e338a3982850aa1ba3d 2024-11-07T17:15:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/8b4cbcb54b3a491ca09652ad73168bc8 is 50, key is test_row_0/C:col10/1730999736384/Put/seqid=0 2024-11-07T17:15:37,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741940_1116 (size=12301) 2024-11-07T17:15:37,827 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/8b4cbcb54b3a491ca09652ad73168bc8 2024-11-07T17:15:37,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999797826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999797826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999797826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999797827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999797828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:37,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/a6fd25756e324a32ac6640f696b98c59 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a6fd25756e324a32ac6640f696b98c59 2024-11-07T17:15:37,838 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a6fd25756e324a32ac6640f696b98c59, entries=150, sequenceid=494, filesize=12.0 K 2024-11-07T17:15:37,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/f837cee91c774e338a3982850aa1ba3d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f837cee91c774e338a3982850aa1ba3d 2024-11-07T17:15:37,845 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f837cee91c774e338a3982850aa1ba3d, entries=150, sequenceid=494, filesize=12.0 K 2024-11-07T17:15:37,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/8b4cbcb54b3a491ca09652ad73168bc8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/8b4cbcb54b3a491ca09652ad73168bc8 2024-11-07T17:15:37,853 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/8b4cbcb54b3a491ca09652ad73168bc8, entries=150, sequenceid=494, filesize=12.0 K 2024-11-07T17:15:37,853 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 852ea2728c497a9e191625c6cb13c906 in 472ms, sequenceid=494, compaction requested=true 2024-11-07T17:15:37,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:37,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:37,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-07T17:15:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-07T17:15:37,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-07T17:15:37,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3980 sec 2024-11-07T17:15:37,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.4060 sec 2024-11-07T17:15:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:38,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:15:38,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:38,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:38,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:38,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:38,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:38,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:38,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8025a753b32e4b238e5ae5a5b5b6dbaa is 50, key is test_row_0/A:col10/1730999737518/Put/seqid=0 2024-11-07T17:15:38,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999798146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999798147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999798147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999798152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999798152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741941_1117 (size=12301) 2024-11-07T17:15:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999798254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999798255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999798255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999798255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999798255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999798457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999798458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999798458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999798459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999798460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T17:15:38,559 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-07T17:15:38,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:38,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-07T17:15:38,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-07T17:15:38,563 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:38,563 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:38,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:38,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=518 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8025a753b32e4b238e5ae5a5b5b6dbaa 2024-11-07T17:15:38,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/0f5c35f268ed4c5ea14c4edb1a48063c is 50, key is test_row_0/B:col10/1730999737518/Put/seqid=0 2024-11-07T17:15:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741942_1118 (size=12301) 2024-11-07T17:15:38,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=518 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/0f5c35f268ed4c5ea14c4edb1a48063c 2024-11-07T17:15:38,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e8bfc01b24884b2188cec2cd610613aa is 50, key is test_row_0/C:col10/1730999737518/Put/seqid=0 2024-11-07T17:15:38,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741943_1119 (size=12301) 2024-11-07T17:15:38,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-07T17:15:38,717 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-07T17:15:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:38,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:38,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999798762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999798762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999798762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999798763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:38,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999798764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-07T17:15:38,870 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:38,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-07T17:15:38,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:38,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:38,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:38,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:38,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:39,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=518 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e8bfc01b24884b2188cec2cd610613aa 2024-11-07T17:15:39,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8025a753b32e4b238e5ae5a5b5b6dbaa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8025a753b32e4b238e5ae5a5b5b6dbaa 2024-11-07T17:15:39,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8025a753b32e4b238e5ae5a5b5b6dbaa, entries=150, sequenceid=518, filesize=12.0 K 2024-11-07T17:15:39,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/0f5c35f268ed4c5ea14c4edb1a48063c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/0f5c35f268ed4c5ea14c4edb1a48063c 2024-11-07T17:15:39,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-07T17:15:39,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:39,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:39,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:39,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/0f5c35f268ed4c5ea14c4edb1a48063c, entries=150, sequenceid=518, filesize=12.0 K 2024-11-07T17:15:39,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e8bfc01b24884b2188cec2cd610613aa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e8bfc01b24884b2188cec2cd610613aa 2024-11-07T17:15:39,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e8bfc01b24884b2188cec2cd610613aa, entries=150, sequenceid=518, filesize=12.0 K 2024-11-07T17:15:39,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 852ea2728c497a9e191625c6cb13c906 in 913ms, sequenceid=518, compaction requested=true 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:39,045 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:39,045 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:39,047 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:39,047 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:39,047 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,047 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:39,047 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:39,047 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/6ed68a1f03194ee1baf3ebd4a2e62355, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/20069430f4af49a8877e13f24c5026fe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f837cee91c774e338a3982850aa1ba3d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/0f5c35f268ed4c5ea14c4edb1a48063c] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=49.1 K 2024-11-07T17:15:39,047 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,047 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/f5e219b358e44f65952506c6d88e199f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/adccef0791e74db0ad342cc15d883920, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a6fd25756e324a32ac6640f696b98c59, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8025a753b32e4b238e5ae5a5b5b6dbaa] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=49.1 K 2024-11-07T17:15:39,052 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed68a1f03194ee1baf3ebd4a2e62355, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1730999735533 2024-11-07T17:15:39,053 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5e219b358e44f65952506c6d88e199f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1730999735533 2024-11-07T17:15:39,053 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 20069430f4af49a8877e13f24c5026fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1730999735671 2024-11-07T17:15:39,053 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting adccef0791e74db0ad342cc15d883920, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1730999735671 2024-11-07T17:15:39,054 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f837cee91c774e338a3982850aa1ba3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1730999736342 2024-11-07T17:15:39,054 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6fd25756e324a32ac6640f696b98c59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1730999736342 2024-11-07T17:15:39,055 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8025a753b32e4b238e5ae5a5b5b6dbaa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1730999737518 2024-11-07T17:15:39,055 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f5c35f268ed4c5ea14c4edb1a48063c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1730999737518 2024-11-07T17:15:39,074 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:39,075 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/64f51bd8989345588b735bc9cd89c77d is 50, key is test_row_0/B:col10/1730999737518/Put/seqid=0 2024-11-07T17:15:39,077 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:39,078 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/d1a38a40494b41d784aaa98c40f840e7 is 50, key is test_row_0/A:col10/1730999737518/Put/seqid=0 2024-11-07T17:15:39,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741944_1120 (size=13493) 2024-11-07T17:15:39,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741945_1121 (size=13493) 2024-11-07T17:15:39,100 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/d1a38a40494b41d784aaa98c40f840e7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/d1a38a40494b41d784aaa98c40f840e7 2024-11-07T17:15:39,109 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into d1a38a40494b41d784aaa98c40f840e7(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:39,109 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:39,109 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=12, startTime=1730999739045; duration=0sec 2024-11-07T17:15:39,109 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:39,109 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:39,109 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:39,112 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:39,112 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:39,112 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,112 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/a4da75b3807546b28d91def410867520, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0cfa2926af6348c581f79bbbfba925ed, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/8b4cbcb54b3a491ca09652ad73168bc8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e8bfc01b24884b2188cec2cd610613aa] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=49.1 K 2024-11-07T17:15:39,113 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4da75b3807546b28d91def410867520, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1730999735533 2024-11-07T17:15:39,113 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cfa2926af6348c581f79bbbfba925ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1730999735671 2024-11-07T17:15:39,114 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b4cbcb54b3a491ca09652ad73168bc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1730999736342 2024-11-07T17:15:39,115 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8bfc01b24884b2188cec2cd610613aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1730999737518 2024-11-07T17:15:39,132 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#107 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:39,133 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/3a9b032d8c444268ac08c558ea31d63d is 50, key is test_row_0/C:col10/1730999737518/Put/seqid=0 2024-11-07T17:15:39,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741946_1122 (size=13493) 2024-11-07T17:15:39,159 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/3a9b032d8c444268ac08c558ea31d63d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/3a9b032d8c444268ac08c558ea31d63d 2024-11-07T17:15:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-07T17:15:39,175 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into 3a9b032d8c444268ac08c558ea31d63d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:39,175 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:39,175 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=12, startTime=1730999739045; duration=0sec 2024-11-07T17:15:39,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:39,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:39,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-07T17:15:39,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,179 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T17:15:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:39,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/7fe4ef182d2a474cb6812a6d43a68de5 is 50, key is test_row_0/A:col10/1730999738147/Put/seqid=0 2024-11-07T17:15:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741947_1123 (size=12301) 2024-11-07T17:15:39,208 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/7fe4ef182d2a474cb6812a6d43a68de5 2024-11-07T17:15:39,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/b37904531f7e449790ecbdb08934254d is 50, key is test_row_0/B:col10/1730999738147/Put/seqid=0 2024-11-07T17:15:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:39,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. as already flushing 2024-11-07T17:15:39,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741948_1124 (size=12301) 2024-11-07T17:15:39,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999799307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999799309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999799312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999799316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999799316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999799418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999799420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999799423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999799450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999799453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,494 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/64f51bd8989345588b735bc9cd89c77d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/64f51bd8989345588b735bc9cd89c77d 2024-11-07T17:15:39,503 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 64f51bd8989345588b735bc9cd89c77d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:39,503 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:39,503 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=12, startTime=1730999739045; duration=0sec 2024-11-07T17:15:39,504 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:39,504 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:39,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999799623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999799623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999799626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999799660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999799660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-07T17:15:39,678 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/b37904531f7e449790ecbdb08934254d 2024-11-07T17:15:39,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/5dd2d4422e684b96b8419bcb979a49b9 is 50, key is test_row_0/C:col10/1730999738147/Put/seqid=0 2024-11-07T17:15:39,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741949_1125 (size=12301) 2024-11-07T17:15:39,731 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/5dd2d4422e684b96b8419bcb979a49b9 2024-11-07T17:15:39,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/7fe4ef182d2a474cb6812a6d43a68de5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7fe4ef182d2a474cb6812a6d43a68de5 2024-11-07T17:15:39,749 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7fe4ef182d2a474cb6812a6d43a68de5, entries=150, sequenceid=532, filesize=12.0 K 2024-11-07T17:15:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/b37904531f7e449790ecbdb08934254d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/b37904531f7e449790ecbdb08934254d 2024-11-07T17:15:39,757 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/b37904531f7e449790ecbdb08934254d, entries=150, sequenceid=532, filesize=12.0 K 2024-11-07T17:15:39,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/5dd2d4422e684b96b8419bcb979a49b9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/5dd2d4422e684b96b8419bcb979a49b9 2024-11-07T17:15:39,766 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/5dd2d4422e684b96b8419bcb979a49b9, entries=150, sequenceid=532, filesize=12.0 K 2024-11-07T17:15:39,767 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 852ea2728c497a9e191625c6cb13c906 in 589ms, sequenceid=532, compaction requested=false 2024-11-07T17:15:39,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:39,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:39,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-07T17:15:39,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-07T17:15:39,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-07T17:15:39,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2060 sec 2024-11-07T17:15:39,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.2110 sec 2024-11-07T17:15:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:39,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T17:15:39,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:39,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:39,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:39,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:39,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:39,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:39,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/72744eae30a94cec8a099e462ef05b90 is 50, key is test_row_0/A:col10/1730999739928/Put/seqid=0 2024-11-07T17:15:39,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999799938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999799939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999799941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741950_1126 (size=14741) 2024-11-07T17:15:39,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/72744eae30a94cec8a099e462ef05b90 2024-11-07T17:15:39,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/913b62f283384db39092c8056b5e5028 is 50, key is test_row_0/B:col10/1730999739928/Put/seqid=0 2024-11-07T17:15:39,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39854 deadline: 1730999799963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39826 deadline: 1730999799963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:39,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741951_1127 (size=12301) 2024-11-07T17:15:40,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:40,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999800042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999800042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999800045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39870 deadline: 1730999800244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1730999800244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:40,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39838 deadline: 1730999800252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/913b62f283384db39092c8056b5e5028 2024-11-07T17:15:40,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/d6faacd55f4d43c8a8f35fc5b5adc165 is 50, key is test_row_0/C:col10/1730999739928/Put/seqid=0 2024-11-07T17:15:40,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741952_1128 (size=12301) 2024-11-07T17:15:40,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/d6faacd55f4d43c8a8f35fc5b5adc165 2024-11-07T17:15:40,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/72744eae30a94cec8a099e462ef05b90 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72744eae30a94cec8a099e462ef05b90 2024-11-07T17:15:40,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72744eae30a94cec8a099e462ef05b90, entries=200, sequenceid=558, filesize=14.4 K 2024-11-07T17:15:40,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/913b62f283384db39092c8056b5e5028 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/913b62f283384db39092c8056b5e5028 2024-11-07T17:15:40,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/913b62f283384db39092c8056b5e5028, entries=150, sequenceid=558, filesize=12.0 K 2024-11-07T17:15:40,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/d6faacd55f4d43c8a8f35fc5b5adc165 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/d6faacd55f4d43c8a8f35fc5b5adc165 2024-11-07T17:15:40,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/d6faacd55f4d43c8a8f35fc5b5adc165, entries=150, sequenceid=558, filesize=12.0 K 2024-11-07T17:15:40,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 852ea2728c497a9e191625c6cb13c906 in 512ms, sequenceid=558, compaction requested=true 2024-11-07T17:15:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:40,441 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:40,441 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:40,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:40,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:40,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 852ea2728c497a9e191625c6cb13c906:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:40,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:40,443 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:40,443 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/B is initiating minor compaction (all files) 2024-11-07T17:15:40,443 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/B in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:40,444 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/64f51bd8989345588b735bc9cd89c77d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/b37904531f7e449790ecbdb08934254d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/913b62f283384db39092c8056b5e5028] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=37.2 K 2024-11-07T17:15:40,444 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:40,444 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/A is initiating minor compaction (all files) 2024-11-07T17:15:40,444 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/A in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:40,444 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/d1a38a40494b41d784aaa98c40f840e7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7fe4ef182d2a474cb6812a6d43a68de5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72744eae30a94cec8a099e462ef05b90] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=39.6 K 2024-11-07T17:15:40,444 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 64f51bd8989345588b735bc9cd89c77d, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1730999737518 2024-11-07T17:15:40,445 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1a38a40494b41d784aaa98c40f840e7, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1730999737518 2024-11-07T17:15:40,445 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b37904531f7e449790ecbdb08934254d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1730999738142 2024-11-07T17:15:40,445 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fe4ef182d2a474cb6812a6d43a68de5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1730999738142 2024-11-07T17:15:40,446 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 913b62f283384db39092c8056b5e5028, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1730999739304 2024-11-07T17:15:40,446 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72744eae30a94cec8a099e462ef05b90, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1730999739304 2024-11-07T17:15:40,458 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#A#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:40,459 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/72e877bc577647d2b0d4025c3da8cced is 50, key is test_row_0/A:col10/1730999739928/Put/seqid=0 2024-11-07T17:15:40,459 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:64938 2024-11-07T17:15:40,459 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,461 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:64938 2024-11-07T17:15:40,461 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,461 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:64938 2024-11-07T17:15:40,461 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,463 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:64938 2024-11-07T17:15:40,463 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,466 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#B#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:40,467 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/8ca7903c8d4d48e9a52b0c9dbd31889b is 50, key is test_row_0/B:col10/1730999739928/Put/seqid=0 2024-11-07T17:15:40,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741953_1129 (size=13595) 2024-11-07T17:15:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:40,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T17:15:40,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:40,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:40,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:40,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:40,472 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:64938 2024-11-07T17:15:40,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:40,472 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:40,476 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:64938 2024-11-07T17:15:40,476 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741954_1130 (size=13595) 2024-11-07T17:15:40,485 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/72e877bc577647d2b0d4025c3da8cced as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72e877bc577647d2b0d4025c3da8cced 2024-11-07T17:15:40,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8e1a9ca0fad54d4682e2849b8bcd7dd8 is 50, key is test_row_0/A:col10/1730999740470/Put/seqid=0 2024-11-07T17:15:40,492 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/A of 852ea2728c497a9e191625c6cb13c906 into 72e877bc577647d2b0d4025c3da8cced(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:40,492 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:40,492 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/8ca7903c8d4d48e9a52b0c9dbd31889b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8ca7903c8d4d48e9a52b0c9dbd31889b 2024-11-07T17:15:40,492 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/A, priority=13, startTime=1730999740441; duration=0sec 2024-11-07T17:15:40,492 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:40,493 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:A 2024-11-07T17:15:40,493 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:40,494 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:40,494 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 852ea2728c497a9e191625c6cb13c906/C is initiating minor compaction (all files) 2024-11-07T17:15:40,494 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 852ea2728c497a9e191625c6cb13c906/C in TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:40,495 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/3a9b032d8c444268ac08c558ea31d63d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/5dd2d4422e684b96b8419bcb979a49b9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/d6faacd55f4d43c8a8f35fc5b5adc165] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp, totalSize=37.2 K 2024-11-07T17:15:40,495 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a9b032d8c444268ac08c558ea31d63d, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1730999737518 2024-11-07T17:15:40,496 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dd2d4422e684b96b8419bcb979a49b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1730999738142 2024-11-07T17:15:40,496 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6faacd55f4d43c8a8f35fc5b5adc165, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1730999739304 2024-11-07T17:15:40,501 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/B of 852ea2728c497a9e191625c6cb13c906 into 8ca7903c8d4d48e9a52b0c9dbd31889b(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:40,501 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:40,501 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/B, priority=13, startTime=1730999740441; duration=0sec 2024-11-07T17:15:40,501 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:40,501 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:B 2024-11-07T17:15:40,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741955_1131 (size=12301) 2024-11-07T17:15:40,507 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 852ea2728c497a9e191625c6cb13c906#C#compaction#117 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:40,508 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b15f8a3b8dbf453eb0f7fb2b9aa92bf3 is 50, key is test_row_0/C:col10/1730999739928/Put/seqid=0 2024-11-07T17:15:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741956_1132 (size=13595) 2024-11-07T17:15:40,551 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:64938 2024-11-07T17:15:40,551 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:64938 2024-11-07T17:15:40,551 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,551 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,557 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:64938 2024-11-07T17:15:40,557 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-07T17:15:40,668 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 94 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5368 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5409 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2431 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7292 rows 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2424 2024-11-07T17:15:40,669 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7271 rows 2024-11-07T17:15:40,669 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:15:40,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:64938 2024-11-07T17:15:40,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:15:40,673 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T17:15:40,680 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T17:15:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:40,689 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999740689"}]},"ts":"1730999740689"} 2024-11-07T17:15:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T17:15:40,691 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T17:15:40,693 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T17:15:40,694 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:15:40,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, UNASSIGN}] 2024-11-07T17:15:40,699 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, UNASSIGN 2024-11-07T17:15:40,700 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=852ea2728c497a9e191625c6cb13c906, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,701 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:15:40,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T17:15:40,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:40,859 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:40,859 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:15:40,860 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 852ea2728c497a9e191625c6cb13c906, disabling compactions & flushes 2024-11-07T17:15:40,860 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1942): waiting for 1 compactions & cache flush to complete for region TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:40,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8e1a9ca0fad54d4682e2849b8bcd7dd8 2024-11-07T17:15:40,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/c4fcdd839b674aab80017b94a589730e is 50, key is test_row_0/B:col10/1730999740470/Put/seqid=0 2024-11-07T17:15:40,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741957_1133 (size=12301) 2024-11-07T17:15:40,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/c4fcdd839b674aab80017b94a589730e 2024-11-07T17:15:40,921 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/b15f8a3b8dbf453eb0f7fb2b9aa92bf3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b15f8a3b8dbf453eb0f7fb2b9aa92bf3 2024-11-07T17:15:40,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/346eb86d1a364bc387538e2e30b5acfb is 50, key is test_row_0/C:col10/1730999740470/Put/seqid=0 2024-11-07T17:15:40,927 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 852ea2728c497a9e191625c6cb13c906/C of 852ea2728c497a9e191625c6cb13c906 into b15f8a3b8dbf453eb0f7fb2b9aa92bf3(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:40,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:40,927 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906., storeName=852ea2728c497a9e191625c6cb13c906/C, priority=13, startTime=1730999740442; duration=0sec 2024-11-07T17:15:40,927 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:40,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:40,927 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 852ea2728c497a9e191625c6cb13c906:C 2024-11-07T17:15:40,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741958_1134 (size=12301) 2024-11-07T17:15:40,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T17:15:41,141 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T17:15:41,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T17:15:41,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/346eb86d1a364bc387538e2e30b5acfb 2024-11-07T17:15:41,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/8e1a9ca0fad54d4682e2849b8bcd7dd8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8e1a9ca0fad54d4682e2849b8bcd7dd8 2024-11-07T17:15:41,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8e1a9ca0fad54d4682e2849b8bcd7dd8, entries=150, sequenceid=571, filesize=12.0 K 2024-11-07T17:15:41,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/c4fcdd839b674aab80017b94a589730e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/c4fcdd839b674aab80017b94a589730e 2024-11-07T17:15:41,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/c4fcdd839b674aab80017b94a589730e, entries=150, sequenceid=571, filesize=12.0 K 2024-11-07T17:15:41,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/346eb86d1a364bc387538e2e30b5acfb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/346eb86d1a364bc387538e2e30b5acfb 2024-11-07T17:15:41,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/346eb86d1a364bc387538e2e30b5acfb, entries=150, sequenceid=571, filesize=12.0 K 2024-11-07T17:15:41,349 ERROR [LeaseRenewer:jenkins@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:39903,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:41,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=20.13 KB/20610 for 852ea2728c497a9e191625c6cb13c906 in 879ms, sequenceid=571, compaction requested=false 2024-11-07T17:15:41,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:41,350 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:41,350 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:41,350 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. after waiting 0 ms 2024-11-07T17:15:41,350 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:41,350 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing 852ea2728c497a9e191625c6cb13c906 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-07T17:15:41,351 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=A 2024-11-07T17:15:41,351 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:41,351 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=B 2024-11-07T17:15:41,351 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:41,351 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 852ea2728c497a9e191625c6cb13c906, store=C 2024-11-07T17:15:41,351 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:41,355 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/14eda177307045c28b4cb771bc4239b3 is 50, key is test_row_0/A:col10/1730999740556/Put/seqid=0 2024-11-07T17:15:41,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741959_1135 (size=12301) 2024-11-07T17:15:41,762 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/14eda177307045c28b4cb771bc4239b3 2024-11-07T17:15:41,772 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/19d60cab09b84640a603bac99f57fa63 is 50, key is test_row_0/B:col10/1730999740556/Put/seqid=0 2024-11-07T17:15:41,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741960_1136 (size=12301) 2024-11-07T17:15:41,782 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/19d60cab09b84640a603bac99f57fa63 2024-11-07T17:15:41,789 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e4c904c5b6fa4037ad56440ddb685393 is 50, key is test_row_0/C:col10/1730999740556/Put/seqid=0 2024-11-07T17:15:41,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T17:15:41,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741961_1137 (size=12301) 2024-11-07T17:15:42,195 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e4c904c5b6fa4037ad56440ddb685393 2024-11-07T17:15:42,200 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/A/14eda177307045c28b4cb771bc4239b3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/14eda177307045c28b4cb771bc4239b3 2024-11-07T17:15:42,205 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/14eda177307045c28b4cb771bc4239b3, entries=150, sequenceid=580, filesize=12.0 K 2024-11-07T17:15:42,206 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/B/19d60cab09b84640a603bac99f57fa63 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/19d60cab09b84640a603bac99f57fa63 2024-11-07T17:15:42,210 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/19d60cab09b84640a603bac99f57fa63, entries=150, sequenceid=580, filesize=12.0 K 2024-11-07T17:15:42,211 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/.tmp/C/e4c904c5b6fa4037ad56440ddb685393 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e4c904c5b6fa4037ad56440ddb685393 2024-11-07T17:15:42,216 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e4c904c5b6fa4037ad56440ddb685393, entries=150, sequenceid=580, filesize=12.0 K 2024-11-07T17:15:42,217 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 852ea2728c497a9e191625c6cb13c906 in 867ms, sequenceid=580, compaction requested=true 2024-11-07T17:15:42,218 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8b77b00435a548979b1f95b2525563b8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/386a7c14032b4fa9b2ceab6f741f418a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/e195725e858a435baab24376d4267640, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/451c95139e564769aff35e505af0d30a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/39c16f66d4d1442a85bb8c9eb897afc3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cfb2d5834c8748549be25c5a3e340039, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/5d33365a07eb41eaa9c5c2d381bc66bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/475e4da76a394b258c228c9bae8cfbbd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/3800882c54ef462c8420596ab9fa3eac, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/22d4b6e1206d4ea5b69ebfa2d78a8b04, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c6a6ce4ea9404736ab586cdb5224694f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8a4bde71b0b34b59ad4e8bcd9521dcfd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cb11707b8e9e48a3b0c3382dd6f755c5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a49a466b2c0b4338bab13e1728941add, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c70c6944b3e14b8e84b8bb72c3967c3f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7c8d4867859c4155851a398670e3edf0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/0299fd4d7eab4e399a29037afdfcdd8e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/816c63a6740a45628038fddbef43837c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/1ddf3ed330404dc1a312f65513a446cb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/9e0f574e7eb446fdb0d6006cab96bbaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b955d5b6f05c4553a3541bd89f955b17, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2a4dba6a571d4081bf026c68b39393cf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b302196dcf754a26aa968d4fb81beec1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/711eaedab86244f19b70cfdfd8a49985, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c3fa624f740549cf8ad067a416f47da7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/11c10215d41f4cd48dfc4fc69219c01e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/64446b71e2944ab2982013a54b00372b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/53157fe3274f445aa4a09f1d47ead136, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/ec787500da6141d5b8fed3409b465d1f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/10cf8db124ed46588304ed100c7c88d3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/f5e219b358e44f65952506c6d88e199f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2b1f7ed46cb44807a06975fa65776747, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/adccef0791e74db0ad342cc15d883920, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a6fd25756e324a32ac6640f696b98c59, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/d1a38a40494b41d784aaa98c40f840e7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8025a753b32e4b238e5ae5a5b5b6dbaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7fe4ef182d2a474cb6812a6d43a68de5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72744eae30a94cec8a099e462ef05b90] to archive 2024-11-07T17:15:42,221 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:15:42,227 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8b77b00435a548979b1f95b2525563b8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8b77b00435a548979b1f95b2525563b8 2024-11-07T17:15:42,229 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/386a7c14032b4fa9b2ceab6f741f418a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/386a7c14032b4fa9b2ceab6f741f418a 2024-11-07T17:15:42,231 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/e195725e858a435baab24376d4267640 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/e195725e858a435baab24376d4267640 2024-11-07T17:15:42,232 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/451c95139e564769aff35e505af0d30a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/451c95139e564769aff35e505af0d30a 2024-11-07T17:15:42,233 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/39c16f66d4d1442a85bb8c9eb897afc3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/39c16f66d4d1442a85bb8c9eb897afc3 2024-11-07T17:15:42,235 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cfb2d5834c8748549be25c5a3e340039 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cfb2d5834c8748549be25c5a3e340039 2024-11-07T17:15:42,236 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/5d33365a07eb41eaa9c5c2d381bc66bf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/5d33365a07eb41eaa9c5c2d381bc66bf 2024-11-07T17:15:42,238 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/475e4da76a394b258c228c9bae8cfbbd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/475e4da76a394b258c228c9bae8cfbbd 2024-11-07T17:15:42,239 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/3800882c54ef462c8420596ab9fa3eac to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/3800882c54ef462c8420596ab9fa3eac 2024-11-07T17:15:42,241 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/22d4b6e1206d4ea5b69ebfa2d78a8b04 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/22d4b6e1206d4ea5b69ebfa2d78a8b04 2024-11-07T17:15:42,242 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c6a6ce4ea9404736ab586cdb5224694f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c6a6ce4ea9404736ab586cdb5224694f 2024-11-07T17:15:42,243 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8a4bde71b0b34b59ad4e8bcd9521dcfd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8a4bde71b0b34b59ad4e8bcd9521dcfd 2024-11-07T17:15:42,245 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cb11707b8e9e48a3b0c3382dd6f755c5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/cb11707b8e9e48a3b0c3382dd6f755c5 2024-11-07T17:15:42,246 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a49a466b2c0b4338bab13e1728941add to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a49a466b2c0b4338bab13e1728941add 2024-11-07T17:15:42,248 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c70c6944b3e14b8e84b8bb72c3967c3f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c70c6944b3e14b8e84b8bb72c3967c3f 2024-11-07T17:15:42,249 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7c8d4867859c4155851a398670e3edf0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7c8d4867859c4155851a398670e3edf0 2024-11-07T17:15:42,251 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/0299fd4d7eab4e399a29037afdfcdd8e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/0299fd4d7eab4e399a29037afdfcdd8e 2024-11-07T17:15:42,252 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/816c63a6740a45628038fddbef43837c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/816c63a6740a45628038fddbef43837c 2024-11-07T17:15:42,253 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/1ddf3ed330404dc1a312f65513a446cb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/1ddf3ed330404dc1a312f65513a446cb 2024-11-07T17:15:42,255 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/9e0f574e7eb446fdb0d6006cab96bbaa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/9e0f574e7eb446fdb0d6006cab96bbaa 2024-11-07T17:15:42,256 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b955d5b6f05c4553a3541bd89f955b17 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b955d5b6f05c4553a3541bd89f955b17 2024-11-07T17:15:42,257 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2a4dba6a571d4081bf026c68b39393cf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2a4dba6a571d4081bf026c68b39393cf 2024-11-07T17:15:42,259 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b302196dcf754a26aa968d4fb81beec1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/b302196dcf754a26aa968d4fb81beec1 2024-11-07T17:15:42,261 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/711eaedab86244f19b70cfdfd8a49985 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/711eaedab86244f19b70cfdfd8a49985 2024-11-07T17:15:42,262 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c3fa624f740549cf8ad067a416f47da7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/c3fa624f740549cf8ad067a416f47da7 2024-11-07T17:15:42,263 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/11c10215d41f4cd48dfc4fc69219c01e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/11c10215d41f4cd48dfc4fc69219c01e 2024-11-07T17:15:42,265 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/64446b71e2944ab2982013a54b00372b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/64446b71e2944ab2982013a54b00372b 2024-11-07T17:15:42,266 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/53157fe3274f445aa4a09f1d47ead136 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/53157fe3274f445aa4a09f1d47ead136 2024-11-07T17:15:42,267 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/ec787500da6141d5b8fed3409b465d1f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/ec787500da6141d5b8fed3409b465d1f 2024-11-07T17:15:42,269 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/10cf8db124ed46588304ed100c7c88d3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/10cf8db124ed46588304ed100c7c88d3 2024-11-07T17:15:42,271 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/f5e219b358e44f65952506c6d88e199f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/f5e219b358e44f65952506c6d88e199f 2024-11-07T17:15:42,272 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2b1f7ed46cb44807a06975fa65776747 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/2b1f7ed46cb44807a06975fa65776747 2024-11-07T17:15:42,274 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/adccef0791e74db0ad342cc15d883920 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/adccef0791e74db0ad342cc15d883920 2024-11-07T17:15:42,275 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a6fd25756e324a32ac6640f696b98c59 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/a6fd25756e324a32ac6640f696b98c59 2024-11-07T17:15:42,277 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/d1a38a40494b41d784aaa98c40f840e7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/d1a38a40494b41d784aaa98c40f840e7 2024-11-07T17:15:42,278 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8025a753b32e4b238e5ae5a5b5b6dbaa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8025a753b32e4b238e5ae5a5b5b6dbaa 2024-11-07T17:15:42,280 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7fe4ef182d2a474cb6812a6d43a68de5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/7fe4ef182d2a474cb6812a6d43a68de5 2024-11-07T17:15:42,281 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72744eae30a94cec8a099e462ef05b90 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72744eae30a94cec8a099e462ef05b90 2024-11-07T17:15:42,300 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/60a90898901c43fe9d4fcac77fa94ddf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/fb6bfc4adbaa43daba384232877f9741, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/2aecf4c241eb49389b6b7fc7313ce7ed, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/84b7f367ba8b45868c4e0959842475c6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/59123d9b9e724c2497084651d280ef87, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a34d49e3e34f4471a784edeb0dbdd8e5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/34a69534615048cd81635b2408003a31, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/7d95bd4bb482470585dcf7043b17147c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/74700daf86154f7c955c2d13ee44a690, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/1b5506c4e5004ceea1f412d69302f0dd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/998509a81085482cba5bcb611d836c0a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f1695239624c4fd48b3c0212eeabefd1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a18b7db1afc145ea9e84ddcc39a538d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/141047d3510f42649e96065a39b1246f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bfa2cc754cd64e5ebcd0e840ec853f63, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/65c367a95c674ca9938cda40fa220a19, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/9a131fd12049464dae1436ac87f775a7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4060fadf9395459db992d3efa5fed97e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/5c77a2b80dc74dc0ba35cf45d1657b5c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/301a2eb2ddf147288c7fe66e577d50ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ccfd052970e04f5482d5ceeb51e3af21, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/05d5250386ae4cf08606318a432cc60f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bb2d25087f0c471faef5172ab1d04d6c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4405b68c67104189bea93839c28bd1bd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ec79e4d2e1d442f09e5d172680b02b78, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/82a334885ef14a159027299cbcf79e0e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/677b57d6777f47a281ebad1457c44c6b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ed1ea9c826ae43e9810adafe62e5809a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8efa26b2ec0b4457b309ed49585bba57, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/e6fa8db01c9942acb840bf5e06e31b01, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/6ed68a1f03194ee1baf3ebd4a2e62355, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/278d855049274f2bb59cd7a7446573ac, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/20069430f4af49a8877e13f24c5026fe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f837cee91c774e338a3982850aa1ba3d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/64f51bd8989345588b735bc9cd89c77d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/0f5c35f268ed4c5ea14c4edb1a48063c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/b37904531f7e449790ecbdb08934254d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/913b62f283384db39092c8056b5e5028] to archive 2024-11-07T17:15:42,302 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:15:42,304 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/60a90898901c43fe9d4fcac77fa94ddf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/60a90898901c43fe9d4fcac77fa94ddf 2024-11-07T17:15:42,306 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/fb6bfc4adbaa43daba384232877f9741 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/fb6bfc4adbaa43daba384232877f9741 2024-11-07T17:15:42,307 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/2aecf4c241eb49389b6b7fc7313ce7ed to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/2aecf4c241eb49389b6b7fc7313ce7ed 2024-11-07T17:15:42,309 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/84b7f367ba8b45868c4e0959842475c6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/84b7f367ba8b45868c4e0959842475c6 2024-11-07T17:15:42,319 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/59123d9b9e724c2497084651d280ef87 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/59123d9b9e724c2497084651d280ef87 2024-11-07T17:15:42,321 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a34d49e3e34f4471a784edeb0dbdd8e5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a34d49e3e34f4471a784edeb0dbdd8e5 2024-11-07T17:15:42,322 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/34a69534615048cd81635b2408003a31 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/34a69534615048cd81635b2408003a31 2024-11-07T17:15:42,324 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/7d95bd4bb482470585dcf7043b17147c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/7d95bd4bb482470585dcf7043b17147c 2024-11-07T17:15:42,326 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/74700daf86154f7c955c2d13ee44a690 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/74700daf86154f7c955c2d13ee44a690 2024-11-07T17:15:42,327 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/1b5506c4e5004ceea1f412d69302f0dd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/1b5506c4e5004ceea1f412d69302f0dd 2024-11-07T17:15:42,328 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/998509a81085482cba5bcb611d836c0a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/998509a81085482cba5bcb611d836c0a 2024-11-07T17:15:42,329 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f1695239624c4fd48b3c0212eeabefd1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f1695239624c4fd48b3c0212eeabefd1 2024-11-07T17:15:42,330 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a18b7db1afc145ea9e84ddcc39a538d7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/a18b7db1afc145ea9e84ddcc39a538d7 2024-11-07T17:15:42,332 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/141047d3510f42649e96065a39b1246f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/141047d3510f42649e96065a39b1246f 2024-11-07T17:15:42,333 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bfa2cc754cd64e5ebcd0e840ec853f63 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bfa2cc754cd64e5ebcd0e840ec853f63 2024-11-07T17:15:42,334 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/65c367a95c674ca9938cda40fa220a19 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/65c367a95c674ca9938cda40fa220a19 2024-11-07T17:15:42,335 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/9a131fd12049464dae1436ac87f775a7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/9a131fd12049464dae1436ac87f775a7 2024-11-07T17:15:42,337 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4060fadf9395459db992d3efa5fed97e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4060fadf9395459db992d3efa5fed97e 2024-11-07T17:15:42,338 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/5c77a2b80dc74dc0ba35cf45d1657b5c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/5c77a2b80dc74dc0ba35cf45d1657b5c 2024-11-07T17:15:42,340 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/301a2eb2ddf147288c7fe66e577d50ff to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/301a2eb2ddf147288c7fe66e577d50ff 2024-11-07T17:15:42,341 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ccfd052970e04f5482d5ceeb51e3af21 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ccfd052970e04f5482d5ceeb51e3af21 2024-11-07T17:15:42,342 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/05d5250386ae4cf08606318a432cc60f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/05d5250386ae4cf08606318a432cc60f 2024-11-07T17:15:42,344 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bb2d25087f0c471faef5172ab1d04d6c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/bb2d25087f0c471faef5172ab1d04d6c 2024-11-07T17:15:42,345 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4405b68c67104189bea93839c28bd1bd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/4405b68c67104189bea93839c28bd1bd 2024-11-07T17:15:42,347 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ec79e4d2e1d442f09e5d172680b02b78 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ec79e4d2e1d442f09e5d172680b02b78 2024-11-07T17:15:42,348 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/82a334885ef14a159027299cbcf79e0e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/82a334885ef14a159027299cbcf79e0e 2024-11-07T17:15:42,349 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/677b57d6777f47a281ebad1457c44c6b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/677b57d6777f47a281ebad1457c44c6b 2024-11-07T17:15:42,351 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ed1ea9c826ae43e9810adafe62e5809a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/ed1ea9c826ae43e9810adafe62e5809a 2024-11-07T17:15:42,352 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8efa26b2ec0b4457b309ed49585bba57 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8efa26b2ec0b4457b309ed49585bba57 2024-11-07T17:15:42,353 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/e6fa8db01c9942acb840bf5e06e31b01 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/e6fa8db01c9942acb840bf5e06e31b01 2024-11-07T17:15:42,355 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/6ed68a1f03194ee1baf3ebd4a2e62355 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/6ed68a1f03194ee1baf3ebd4a2e62355 2024-11-07T17:15:42,356 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/278d855049274f2bb59cd7a7446573ac to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/278d855049274f2bb59cd7a7446573ac 2024-11-07T17:15:42,357 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/20069430f4af49a8877e13f24c5026fe to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/20069430f4af49a8877e13f24c5026fe 2024-11-07T17:15:42,358 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f837cee91c774e338a3982850aa1ba3d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/f837cee91c774e338a3982850aa1ba3d 2024-11-07T17:15:42,360 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/64f51bd8989345588b735bc9cd89c77d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/64f51bd8989345588b735bc9cd89c77d 2024-11-07T17:15:42,361 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/0f5c35f268ed4c5ea14c4edb1a48063c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/0f5c35f268ed4c5ea14c4edb1a48063c 2024-11-07T17:15:42,362 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/b37904531f7e449790ecbdb08934254d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/b37904531f7e449790ecbdb08934254d 2024-11-07T17:15:42,363 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/913b62f283384db39092c8056b5e5028 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/913b62f283384db39092c8056b5e5028 2024-11-07T17:15:42,365 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0a2197ec0dc4fb5b49b565a1c806c00, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c5521444a8284f179ce6ed2219016bfd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/097857a35e0a4774acaa2723f8df72ba, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/36a18fb00539472db643838eb06b6eee, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/fcae88eca3314a6d8cbfbbbccffc4618, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cbe51743b02c42338b5cbc412e417ad6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cf433b1ea72b410590a7d6458b041976, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bb4b96ba9b524815abe9a2323cd31e35, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bef7c98a8216415fb5b94bf0718ead21, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b99afde49bd34d1aae0a91cf669a513c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7d6e9ee3aa6b43aaa6c9a7a21604339d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7bc714f8851d40c58a7cc782cd47316a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/91d4d8ffd5474c2aa57af84972c1c0ae, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/800932983a914ffc885ee6a6174c756f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e0f4ed02be4347fcbfb4f44ff65b3401, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/667fee38343a4a4391585419bed85ca7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/71019d55740a49e8a0468bf0166f8ebe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b525123669dd4360a85cb5d7e875d6ea, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/14bd30a5788a40b7b938081ac903703d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/6a44ebdf3c4b415eafadf8154103b8bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/663f4e8aaa7b4cf3807429a11cf84ccb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/eba14396b66c484fafe83a1e619519a5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b6e8dbc92cc846578a6ff97b421fd43a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c6e85087fa094976a98cfa4af51c7a64, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7024083885f04a82afad72034ecc08c7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e3358951dd2d4774a324af1a5e0d41f5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/4cc4bdbc000b45fbbf8f32d257ee2b02, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b9c5cda93e004d07ac5fbd1d8388f3eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0af7cf675388423191e480d80666d52b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/9da14904ada44068bb3cab8b18b53ba9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/a4da75b3807546b28d91def410867520, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0489813d1774583a1c16735919e8160, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0cfa2926af6348c581f79bbbfba925ed, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/8b4cbcb54b3a491ca09652ad73168bc8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/3a9b032d8c444268ac08c558ea31d63d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e8bfc01b24884b2188cec2cd610613aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/5dd2d4422e684b96b8419bcb979a49b9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/d6faacd55f4d43c8a8f35fc5b5adc165] to archive 2024-11-07T17:15:42,366 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:15:42,367 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0a2197ec0dc4fb5b49b565a1c806c00 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0a2197ec0dc4fb5b49b565a1c806c00 2024-11-07T17:15:42,368 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c5521444a8284f179ce6ed2219016bfd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c5521444a8284f179ce6ed2219016bfd 2024-11-07T17:15:42,369 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/097857a35e0a4774acaa2723f8df72ba to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/097857a35e0a4774acaa2723f8df72ba 2024-11-07T17:15:42,370 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/36a18fb00539472db643838eb06b6eee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/36a18fb00539472db643838eb06b6eee 2024-11-07T17:15:42,371 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/fcae88eca3314a6d8cbfbbbccffc4618 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/fcae88eca3314a6d8cbfbbbccffc4618 2024-11-07T17:15:42,372 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cbe51743b02c42338b5cbc412e417ad6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cbe51743b02c42338b5cbc412e417ad6 2024-11-07T17:15:42,373 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cf433b1ea72b410590a7d6458b041976 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/cf433b1ea72b410590a7d6458b041976 2024-11-07T17:15:42,374 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bb4b96ba9b524815abe9a2323cd31e35 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bb4b96ba9b524815abe9a2323cd31e35 2024-11-07T17:15:42,375 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bef7c98a8216415fb5b94bf0718ead21 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/bef7c98a8216415fb5b94bf0718ead21 2024-11-07T17:15:42,377 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b99afde49bd34d1aae0a91cf669a513c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b99afde49bd34d1aae0a91cf669a513c 2024-11-07T17:15:42,378 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7d6e9ee3aa6b43aaa6c9a7a21604339d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7d6e9ee3aa6b43aaa6c9a7a21604339d 2024-11-07T17:15:42,379 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7bc714f8851d40c58a7cc782cd47316a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7bc714f8851d40c58a7cc782cd47316a 2024-11-07T17:15:42,381 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/91d4d8ffd5474c2aa57af84972c1c0ae to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/91d4d8ffd5474c2aa57af84972c1c0ae 2024-11-07T17:15:42,381 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/800932983a914ffc885ee6a6174c756f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/800932983a914ffc885ee6a6174c756f 2024-11-07T17:15:42,383 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e0f4ed02be4347fcbfb4f44ff65b3401 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e0f4ed02be4347fcbfb4f44ff65b3401 2024-11-07T17:15:42,384 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/667fee38343a4a4391585419bed85ca7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/667fee38343a4a4391585419bed85ca7 2024-11-07T17:15:42,385 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/71019d55740a49e8a0468bf0166f8ebe to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/71019d55740a49e8a0468bf0166f8ebe 2024-11-07T17:15:42,386 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b525123669dd4360a85cb5d7e875d6ea to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b525123669dd4360a85cb5d7e875d6ea 2024-11-07T17:15:42,388 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/14bd30a5788a40b7b938081ac903703d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/14bd30a5788a40b7b938081ac903703d 2024-11-07T17:15:42,389 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/6a44ebdf3c4b415eafadf8154103b8bf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/6a44ebdf3c4b415eafadf8154103b8bf 2024-11-07T17:15:42,390 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/663f4e8aaa7b4cf3807429a11cf84ccb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/663f4e8aaa7b4cf3807429a11cf84ccb 2024-11-07T17:15:42,391 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/eba14396b66c484fafe83a1e619519a5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/eba14396b66c484fafe83a1e619519a5 2024-11-07T17:15:42,393 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b6e8dbc92cc846578a6ff97b421fd43a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b6e8dbc92cc846578a6ff97b421fd43a 2024-11-07T17:15:42,394 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c6e85087fa094976a98cfa4af51c7a64 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/c6e85087fa094976a98cfa4af51c7a64 2024-11-07T17:15:42,395 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7024083885f04a82afad72034ecc08c7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/7024083885f04a82afad72034ecc08c7 2024-11-07T17:15:42,397 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e3358951dd2d4774a324af1a5e0d41f5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e3358951dd2d4774a324af1a5e0d41f5 2024-11-07T17:15:42,398 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/4cc4bdbc000b45fbbf8f32d257ee2b02 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/4cc4bdbc000b45fbbf8f32d257ee2b02 2024-11-07T17:15:42,399 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b9c5cda93e004d07ac5fbd1d8388f3eb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b9c5cda93e004d07ac5fbd1d8388f3eb 2024-11-07T17:15:42,400 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0af7cf675388423191e480d80666d52b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0af7cf675388423191e480d80666d52b 2024-11-07T17:15:42,401 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/9da14904ada44068bb3cab8b18b53ba9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/9da14904ada44068bb3cab8b18b53ba9 2024-11-07T17:15:42,402 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/a4da75b3807546b28d91def410867520 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/a4da75b3807546b28d91def410867520 2024-11-07T17:15:42,404 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0489813d1774583a1c16735919e8160 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b0489813d1774583a1c16735919e8160 2024-11-07T17:15:42,405 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0cfa2926af6348c581f79bbbfba925ed to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/0cfa2926af6348c581f79bbbfba925ed 2024-11-07T17:15:42,406 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/8b4cbcb54b3a491ca09652ad73168bc8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/8b4cbcb54b3a491ca09652ad73168bc8 2024-11-07T17:15:42,407 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/3a9b032d8c444268ac08c558ea31d63d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/3a9b032d8c444268ac08c558ea31d63d 2024-11-07T17:15:42,409 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e8bfc01b24884b2188cec2cd610613aa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e8bfc01b24884b2188cec2cd610613aa 2024-11-07T17:15:42,410 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/5dd2d4422e684b96b8419bcb979a49b9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/5dd2d4422e684b96b8419bcb979a49b9 2024-11-07T17:15:42,412 DEBUG [StoreCloser-TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/d6faacd55f4d43c8a8f35fc5b5adc165 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/d6faacd55f4d43c8a8f35fc5b5adc165 2024-11-07T17:15:42,417 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/recovered.edits/583.seqid, newMaxSeqId=583, maxSeqId=1 2024-11-07T17:15:42,420 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906. 2024-11-07T17:15:42,420 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 852ea2728c497a9e191625c6cb13c906: 2024-11-07T17:15:42,422 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:42,422 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=852ea2728c497a9e191625c6cb13c906, regionState=CLOSED 2024-11-07T17:15:42,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-07T17:15:42,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 852ea2728c497a9e191625c6cb13c906, server=3a0fde618c86,37403,1730999712734 in 1.7220 sec 2024-11-07T17:15:42,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-07T17:15:42,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=852ea2728c497a9e191625c6cb13c906, UNASSIGN in 1.7270 sec 2024-11-07T17:15:42,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-07T17:15:42,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7330 sec 2024-11-07T17:15:42,430 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999742430"}]},"ts":"1730999742430"} 2024-11-07T17:15:42,431 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T17:15:42,433 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T17:15:42,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7520 sec 2024-11-07T17:15:42,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T17:15:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T17:15:42,795 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-07T17:15:42,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T17:15:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:42,806 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:42,809 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-07T17:15:42,815 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:42,820 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/recovered.edits] 2024-11-07T17:15:42,824 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/14eda177307045c28b4cb771bc4239b3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/14eda177307045c28b4cb771bc4239b3 2024-11-07T17:15:42,827 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72e877bc577647d2b0d4025c3da8cced to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/72e877bc577647d2b0d4025c3da8cced 2024-11-07T17:15:42,829 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8e1a9ca0fad54d4682e2849b8bcd7dd8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/A/8e1a9ca0fad54d4682e2849b8bcd7dd8 2024-11-07T17:15:42,840 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/19d60cab09b84640a603bac99f57fa63 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/19d60cab09b84640a603bac99f57fa63 2024-11-07T17:15:42,842 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8ca7903c8d4d48e9a52b0c9dbd31889b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/8ca7903c8d4d48e9a52b0c9dbd31889b 2024-11-07T17:15:42,844 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/c4fcdd839b674aab80017b94a589730e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/B/c4fcdd839b674aab80017b94a589730e 2024-11-07T17:15:42,849 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/346eb86d1a364bc387538e2e30b5acfb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/346eb86d1a364bc387538e2e30b5acfb 2024-11-07T17:15:42,851 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b15f8a3b8dbf453eb0f7fb2b9aa92bf3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/b15f8a3b8dbf453eb0f7fb2b9aa92bf3 2024-11-07T17:15:42,857 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e4c904c5b6fa4037ad56440ddb685393 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/C/e4c904c5b6fa4037ad56440ddb685393 2024-11-07T17:15:42,860 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/recovered.edits/583.seqid to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906/recovered.edits/583.seqid 2024-11-07T17:15:42,861 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/852ea2728c497a9e191625c6cb13c906 2024-11-07T17:15:42,861 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T17:15:42,867 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:42,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-07T17:15:42,877 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T17:15:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-07T17:15:42,918 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T17:15:42,921 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:42,921 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T17:15:42,921 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730999742921"}]},"ts":"9223372036854775807"} 2024-11-07T17:15:42,925 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T17:15:42,925 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 852ea2728c497a9e191625c6cb13c906, NAME => 'TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T17:15:42,925 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T17:15:42,926 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730999742925"}]},"ts":"9223372036854775807"} 2024-11-07T17:15:42,932 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T17:15:42,935 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:42,936 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 135 msec 2024-11-07T17:15:43,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-07T17:15:43,113 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-07T17:15:43,129 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1998448365_22 at /127.0.0.1:52562 [Waiting for operation #339] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1742492692_22 at /127.0.0.1:50088 [Waiting for operation #318] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;3a0fde618c86:37403-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1998448365_22 at /127.0.0.1:41034 [Waiting for operation #117] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=356 (was 202) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3217 (was 3832) 2024-11-07T17:15:43,142 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=356, ProcessCount=11, AvailableMemoryMB=3215 2024-11-07T17:15:43,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:15:43,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:15:43,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:43,146 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:15:43,146 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:43,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-07T17:15:43,147 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:15:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-07T17:15:43,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741962_1138 (size=960) 2024-11-07T17:15:43,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-07T17:15:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-07T17:15:43,564 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:15:43,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741963_1139 (size=53) 2024-11-07T17:15:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-07T17:15:43,973 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:43,973 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5c144e9b474530f0a58afb1f68827ff6, disabling compactions & flushes 2024-11-07T17:15:43,974 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:43,974 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:43,974 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. after waiting 0 ms 2024-11-07T17:15:43,974 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:43,974 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:43,974 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:43,975 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:15:43,975 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730999743975"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999743975"}]},"ts":"1730999743975"} 2024-11-07T17:15:43,976 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:15:43,977 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:15:43,977 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999743977"}]},"ts":"1730999743977"} 2024-11-07T17:15:43,978 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T17:15:43,981 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, ASSIGN}] 2024-11-07T17:15:43,982 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, ASSIGN 2024-11-07T17:15:43,983 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:15:44,134 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:44,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-07T17:15:44,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:44,291 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:44,291 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:15:44,292 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,292 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:44,292 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,292 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,293 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,295 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:44,295 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c144e9b474530f0a58afb1f68827ff6 columnFamilyName A 2024-11-07T17:15:44,295 DEBUG [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:44,295 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(327): Store=5c144e9b474530f0a58afb1f68827ff6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:44,296 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,297 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:44,297 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c144e9b474530f0a58afb1f68827ff6 columnFamilyName B 2024-11-07T17:15:44,297 DEBUG [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:44,298 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(327): Store=5c144e9b474530f0a58afb1f68827ff6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:44,298 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,299 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:44,299 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c144e9b474530f0a58afb1f68827ff6 columnFamilyName C 2024-11-07T17:15:44,299 DEBUG [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:44,300 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(327): Store=5c144e9b474530f0a58afb1f68827ff6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:44,300 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:44,301 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,301 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,302 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:15:44,304 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:44,305 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:15:44,306 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened 5c144e9b474530f0a58afb1f68827ff6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74285690, jitterRate=0.1069430410861969}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:15:44,306 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:44,307 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., pid=41, masterSystemTime=1730999744287 2024-11-07T17:15:44,309 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:44,309 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:44,309 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:44,312 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-07T17:15:44,312 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 in 176 msec 2024-11-07T17:15:44,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-07T17:15:44,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, ASSIGN in 331 msec 2024-11-07T17:15:44,314 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:15:44,314 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999744314"}]},"ts":"1730999744314"} 2024-11-07T17:15:44,315 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T17:15:44,317 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:15:44,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1730 sec 2024-11-07T17:15:45,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-07T17:15:45,254 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-07T17:15:45,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x118b007e to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d29de25 2024-11-07T17:15:45,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a378df6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:45,261 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:45,264 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:45,265 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:15:45,267 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:15:45,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:15:45,272 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:15:45,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-07T17:15:45,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741964_1140 (size=996) 2024-11-07T17:15:45,549 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:39903,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:45,693 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-07T17:15:45,693 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-07T17:15:45,697 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:15:45,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, REOPEN/MOVE}] 2024-11-07T17:15:45,707 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, REOPEN/MOVE 2024-11-07T17:15:45,707 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:45,709 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:15:45,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:45,860 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:45,861 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:45,861 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:15:45,861 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing 5c144e9b474530f0a58afb1f68827ff6, disabling compactions & flushes 2024-11-07T17:15:45,861 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:45,861 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:45,861 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. after waiting 0 ms 2024-11-07T17:15:45,861 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:45,866 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T17:15:45,867 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:45,867 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:45,867 WARN [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: 5c144e9b474530f0a58afb1f68827ff6 to self. 2024-11-07T17:15:45,868 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:45,869 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=CLOSED 2024-11-07T17:15:45,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-07T17:15:45,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 in 161 msec 2024-11-07T17:15:45,872 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, REOPEN/MOVE; state=CLOSED, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=true 2024-11-07T17:15:46,023 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:15:46,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,179 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,179 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:15:46,180 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,180 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:15:46,180 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,180 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,182 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,183 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:46,188 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c144e9b474530f0a58afb1f68827ff6 columnFamilyName A 2024-11-07T17:15:46,190 DEBUG [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:46,190 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(327): Store=5c144e9b474530f0a58afb1f68827ff6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:46,191 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,191 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:46,192 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c144e9b474530f0a58afb1f68827ff6 columnFamilyName B 2024-11-07T17:15:46,192 DEBUG [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:46,192 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(327): Store=5c144e9b474530f0a58afb1f68827ff6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:46,192 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,193 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:15:46,193 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c144e9b474530f0a58afb1f68827ff6 columnFamilyName C 2024-11-07T17:15:46,193 DEBUG [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:46,193 INFO [StoreOpener-5c144e9b474530f0a58afb1f68827ff6-1 {}] regionserver.HStore(327): Store=5c144e9b474530f0a58afb1f68827ff6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:15:46,194 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,194 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,195 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,197 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:15:46,198 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,199 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened 5c144e9b474530f0a58afb1f68827ff6; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72690208, jitterRate=0.08316850662231445}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:15:46,199 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:46,200 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., pid=46, masterSystemTime=1730999746176 2024-11-07T17:15:46,202 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,202 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,202 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=OPEN, openSeqNum=5, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-07T17:15:46,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 in 179 msec 2024-11-07T17:15:46,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-07T17:15:46,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, REOPEN/MOVE in 499 msec 2024-11-07T17:15:46,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-07T17:15:46,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 511 msec 2024-11-07T17:15:46,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 935 msec 2024-11-07T17:15:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-07T17:15:46,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79982672 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2931c73e 2024-11-07T17:15:46,227 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bad2e85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,228 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b4bd1ba to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@491ea2ee 2024-11-07T17:15:46,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328f994d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,232 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x454f1431 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@190853fc 2024-11-07T17:15:46,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a9306be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,236 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x505d5ccd to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46114993 2024-11-07T17:15:46,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@465dc764, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,241 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-11-07T17:15:46,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,244 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-11-07T17:15:46,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,247 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b727d6e to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14c16cd4 2024-11-07T17:15:46,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a52344f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,251 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c7940d9 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@341384e 2024-11-07T17:15:46,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ba8425, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,255 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c38ee58 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26b120d9 2024-11-07T17:15:46,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7af61386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:15:46,261 DEBUG [hconnection-0x5b774629-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,261 DEBUG [hconnection-0x761d908d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,263 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,263 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,264 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-07T17:15:46,266 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T17:15:46,267 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:46,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:46,268 DEBUG [hconnection-0x2c47e95c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,269 DEBUG [hconnection-0x2a49bace-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,269 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,270 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,272 DEBUG [hconnection-0x3199c48f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,273 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,276 DEBUG [hconnection-0x35e09396-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:15:46,276 DEBUG [hconnection-0x343da13f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,278 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,278 DEBUG [hconnection-0xc57fcc5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,278 DEBUG [hconnection-0x6f405467-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:15:46,278 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,279 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:46,280 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:15:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:46,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999806317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999806317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999806318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999806319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999806319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110760ff6d2f1c2f49af9a086dc4c9ed5e34_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999746273/Put/seqid=0 2024-11-07T17:15:46,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741965_1141 (size=12154) 2024-11-07T17:15:46,351 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:46,368 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110760ff6d2f1c2f49af9a086dc4c9ed5e34_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110760ff6d2f1c2f49af9a086dc4c9ed5e34_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T17:15:46,371 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/55c0cb1171174d248753dd1aa0658831, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:46,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/55c0cb1171174d248753dd1aa0658831 is 175, key is test_row_0/A:col10/1730999746273/Put/seqid=0 2024-11-07T17:15:46,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741966_1142 (size=30955) 2024-11-07T17:15:46,393 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=21, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/55c0cb1171174d248753dd1aa0658831 2024-11-07T17:15:46,418 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T17:15:46,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:46,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:46,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:46,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/29b518a8bff1485a81b9d38bf8dbf91a is 50, key is test_row_0/B:col10/1730999746273/Put/seqid=0 2024-11-07T17:15:46,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999806421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999806421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999806422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999806422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999806428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741967_1143 (size=12001) 2024-11-07T17:15:46,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/29b518a8bff1485a81b9d38bf8dbf91a 2024-11-07T17:15:46,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9c4a9596311e4314b5fe1921a0fbe3a7 is 50, key is test_row_0/C:col10/1730999746273/Put/seqid=0 2024-11-07T17:15:46,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741968_1144 (size=12001) 2024-11-07T17:15:46,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9c4a9596311e4314b5fe1921a0fbe3a7 2024-11-07T17:15:46,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/55c0cb1171174d248753dd1aa0658831 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831 2024-11-07T17:15:46,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831, entries=150, sequenceid=21, filesize=30.2 K 2024-11-07T17:15:46,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/29b518a8bff1485a81b9d38bf8dbf91a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/29b518a8bff1485a81b9d38bf8dbf91a 2024-11-07T17:15:46,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/29b518a8bff1485a81b9d38bf8dbf91a, entries=150, sequenceid=21, filesize=11.7 K 2024-11-07T17:15:46,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9c4a9596311e4314b5fe1921a0fbe3a7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9c4a9596311e4314b5fe1921a0fbe3a7 2024-11-07T17:15:46,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9c4a9596311e4314b5fe1921a0fbe3a7, entries=150, sequenceid=21, filesize=11.7 K 2024-11-07T17:15:46,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for 5c144e9b474530f0a58afb1f68827ff6 in 256ms, sequenceid=21, compaction requested=false 2024-11-07T17:15:46,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T17:15:46,573 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:46,574 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:46,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fc99fdc4272146acb1caa95f016f6e8e_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999746315/Put/seqid=0 2024-11-07T17:15:46,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741969_1145 (size=12154) 2024-11-07T17:15:46,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:46,605 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fc99fdc4272146acb1caa95f016f6e8e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fc99fdc4272146acb1caa95f016f6e8e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/326686d5ea164bc79f9fe4fef6b96e24, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:46,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/326686d5ea164bc79f9fe4fef6b96e24 is 175, key is test_row_0/A:col10/1730999746315/Put/seqid=0 2024-11-07T17:15:46,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741970_1146 (size=30955) 2024-11-07T17:15:46,623 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/326686d5ea164bc79f9fe4fef6b96e24 2024-11-07T17:15:46,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:46,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/7f9f288bcb3c4b86b186c6b0e15eed7c is 50, key is test_row_0/B:col10/1730999746315/Put/seqid=0 2024-11-07T17:15:46,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999806651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999806652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999806655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999806655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999806657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741971_1147 (size=12001) 2024-11-07T17:15:46,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999806756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999806757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999806758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999806759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999806760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T17:15:46,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999806962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999806962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999806963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999806963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:46,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999806965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,067 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/7f9f288bcb3c4b86b186c6b0e15eed7c 2024-11-07T17:15:47,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/d98dd9a0c7944694aae18e88129b8804 is 50, key is test_row_0/C:col10/1730999746315/Put/seqid=0 2024-11-07T17:15:47,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741972_1148 (size=12001) 2024-11-07T17:15:47,104 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/d98dd9a0c7944694aae18e88129b8804 2024-11-07T17:15:47,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/326686d5ea164bc79f9fe4fef6b96e24 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24 2024-11-07T17:15:47,126 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24, entries=150, sequenceid=42, filesize=30.2 K 2024-11-07T17:15:47,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/7f9f288bcb3c4b86b186c6b0e15eed7c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/7f9f288bcb3c4b86b186c6b0e15eed7c 2024-11-07T17:15:47,132 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/7f9f288bcb3c4b86b186c6b0e15eed7c, entries=150, sequenceid=42, filesize=11.7 K 2024-11-07T17:15:47,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/d98dd9a0c7944694aae18e88129b8804 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d98dd9a0c7944694aae18e88129b8804 2024-11-07T17:15:47,138 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d98dd9a0c7944694aae18e88129b8804, entries=150, sequenceid=42, filesize=11.7 K 2024-11-07T17:15:47,139 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 5c144e9b474530f0a58afb1f68827ff6 in 564ms, sequenceid=42, compaction requested=false 2024-11-07T17:15:47,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:47,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-07T17:15:47,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-07T17:15:47,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-07T17:15:47,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 873 msec 2024-11-07T17:15:47,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 878 msec 2024-11-07T17:15:47,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:47,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T17:15:47,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:47,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:47,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:47,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:47,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:47,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:47,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411077dea312be5e540e3aba16a6e5eb19385_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:47,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999807289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999807291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999807292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999807293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999807294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741973_1149 (size=12154) 2024-11-07T17:15:47,308 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:47,314 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411077dea312be5e540e3aba16a6e5eb19385_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077dea312be5e540e3aba16a6e5eb19385_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:47,316 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/a37f6175294345a1bafd58fcaa91f303, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:47,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/a37f6175294345a1bafd58fcaa91f303 is 175, key is test_row_0/A:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:47,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741974_1150 (size=30955) 2024-11-07T17:15:47,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T17:15:47,372 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-07T17:15:47,374 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:47,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-07T17:15:47,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T17:15:47,378 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:47,378 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:47,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:47,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999807394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999807395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999807396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999807396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999807396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T17:15:47,531 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-07T17:15:47,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:47,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:47,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:47,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:47,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999807597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999807598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999807598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999807598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999807598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T17:15:47,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-07T17:15:47,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:47,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:47,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:47,739 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=60, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/a37f6175294345a1bafd58fcaa91f303 2024-11-07T17:15:47,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/3475c91e8084439aa2322fd803459d60 is 50, key is test_row_0/B:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:47,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741975_1151 (size=12001) 2024-11-07T17:15:47,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/3475c91e8084439aa2322fd803459d60 2024-11-07T17:15:47,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/c1dcdc7795bb49f09c65d9cb67d19f13 is 50, key is test_row_0/C:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741976_1152 (size=12001) 2024-11-07T17:15:47,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/c1dcdc7795bb49f09c65d9cb67d19f13 2024-11-07T17:15:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/a37f6175294345a1bafd58fcaa91f303 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303 2024-11-07T17:15:47,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303, entries=150, sequenceid=60, filesize=30.2 K 2024-11-07T17:15:47,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/3475c91e8084439aa2322fd803459d60 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3475c91e8084439aa2322fd803459d60 2024-11-07T17:15:47,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3475c91e8084439aa2322fd803459d60, entries=150, sequenceid=60, filesize=11.7 K 2024-11-07T17:15:47,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/c1dcdc7795bb49f09c65d9cb67d19f13 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/c1dcdc7795bb49f09c65d9cb67d19f13 2024-11-07T17:15:47,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/c1dcdc7795bb49f09c65d9cb67d19f13, entries=150, sequenceid=60, filesize=11.7 K 2024-11-07T17:15:47,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 5c144e9b474530f0a58afb1f68827ff6 in 571ms, sequenceid=60, compaction requested=true 2024-11-07T17:15:47,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:47,839 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:47,839 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:15:47,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-07T17:15:47,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,840 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:47,840 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:15:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:47,841 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:47,841 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:15:47,841 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,841 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=90.7 K 2024-11-07T17:15:47,842 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,842 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303] 2024-11-07T17:15:47,842 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55c0cb1171174d248753dd1aa0658831, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1730999746273 2024-11-07T17:15:47,844 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 326686d5ea164bc79f9fe4fef6b96e24, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1730999746315 2024-11-07T17:15:47,844 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:47,844 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:15:47,845 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:47,845 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/29b518a8bff1485a81b9d38bf8dbf91a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/7f9f288bcb3c4b86b186c6b0e15eed7c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3475c91e8084439aa2322fd803459d60] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=35.2 K 2024-11-07T17:15:47,845 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a37f6175294345a1bafd58fcaa91f303, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1730999746652 2024-11-07T17:15:47,847 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 29b518a8bff1485a81b9d38bf8dbf91a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1730999746273 2024-11-07T17:15:47,847 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f9f288bcb3c4b86b186c6b0e15eed7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1730999746315 2024-11-07T17:15:47,849 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3475c91e8084439aa2322fd803459d60, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1730999746652 2024-11-07T17:15:47,866 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:47,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070b6998433e3f4cf6aac4082cb25ac07e_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999747290/Put/seqid=0 2024-11-07T17:15:47,875 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#134 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:47,875 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/3c1117032ecf458da13e28c90591fcaa is 50, key is test_row_0/B:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:47,876 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411074303b9ec13fd414d85c3eb9375bbc888_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:47,883 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411074303b9ec13fd414d85c3eb9375bbc888_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:47,883 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074303b9ec13fd414d85c3eb9375bbc888_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:47,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741978_1154 (size=12104) 2024-11-07T17:15:47,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741977_1153 (size=12154) 2024-11-07T17:15:47,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:47,920 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070b6998433e3f4cf6aac4082cb25ac07e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070b6998433e3f4cf6aac4082cb25ac07e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:47,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/6a76286bcf1840eeb77d0d7d2571bfcf, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:47,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/6a76286bcf1840eeb77d0d7d2571bfcf is 175, key is test_row_0/A:col10/1730999747290/Put/seqid=0 2024-11-07T17:15:47,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999807927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999807928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741979_1155 (size=4469) 2024-11-07T17:15:47,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999807931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999807932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:47,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999807933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:47,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741980_1156 (size=30955) 2024-11-07T17:15:47,945 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/6a76286bcf1840eeb77d0d7d2571bfcf 2024-11-07T17:15:47,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/c64b7b6a0b7f4952bd624678e4a072aa is 50, key is test_row_0/B:col10/1730999747290/Put/seqid=0 2024-11-07T17:15:47,964 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T17:15:47,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T17:15:48,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741981_1157 (size=12001) 2024-11-07T17:15:48,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999808034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999808035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999808039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999808039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999808039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999808238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999808239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999808247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999808248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999808249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,311 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/3c1117032ecf458da13e28c90591fcaa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3c1117032ecf458da13e28c90591fcaa 2024-11-07T17:15:48,318 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into 3c1117032ecf458da13e28c90591fcaa(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:48,318 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:48,319 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999747840; duration=0sec 2024-11-07T17:15:48,319 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:48,319 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:15:48,319 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:48,322 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:48,322 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:15:48,322 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,323 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9c4a9596311e4314b5fe1921a0fbe3a7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d98dd9a0c7944694aae18e88129b8804, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/c1dcdc7795bb49f09c65d9cb67d19f13] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=35.2 K 2024-11-07T17:15:48,323 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c4a9596311e4314b5fe1921a0fbe3a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1730999746273 2024-11-07T17:15:48,324 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d98dd9a0c7944694aae18e88129b8804, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1730999746315 2024-11-07T17:15:48,324 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c1dcdc7795bb49f09c65d9cb67d19f13, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1730999746652 2024-11-07T17:15:48,335 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#136 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:48,336 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/33fc364a51e9486a85d46452273dae8d is 50, key is test_row_0/C:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:48,337 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#133 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:48,340 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1e81fb63ede24570bee2e66b66e0036d is 175, key is test_row_0/A:col10/1730999747267/Put/seqid=0 2024-11-07T17:15:48,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741982_1158 (size=12104) 2024-11-07T17:15:48,369 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/33fc364a51e9486a85d46452273dae8d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/33fc364a51e9486a85d46452273dae8d 2024-11-07T17:15:48,376 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into 33fc364a51e9486a85d46452273dae8d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:48,376 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:48,376 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999747840; duration=0sec 2024-11-07T17:15:48,377 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:48,377 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:15:48,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741983_1159 (size=31058) 2024-11-07T17:15:48,391 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1e81fb63ede24570bee2e66b66e0036d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1e81fb63ede24570bee2e66b66e0036d 2024-11-07T17:15:48,400 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 1e81fb63ede24570bee2e66b66e0036d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:48,400 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:48,400 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999747839; duration=0sec 2024-11-07T17:15:48,401 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:48,401 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:15:48,404 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/c64b7b6a0b7f4952bd624678e4a072aa 2024-11-07T17:15:48,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/bd4d8b4587714c3bae3ce10262e8a385 is 50, key is test_row_0/C:col10/1730999747290/Put/seqid=0 2024-11-07T17:15:48,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741984_1160 (size=12001) 2024-11-07T17:15:48,420 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/bd4d8b4587714c3bae3ce10262e8a385 2024-11-07T17:15:48,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/6a76286bcf1840eeb77d0d7d2571bfcf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf 2024-11-07T17:15:48,436 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf, entries=150, sequenceid=79, filesize=30.2 K 2024-11-07T17:15:48,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/c64b7b6a0b7f4952bd624678e4a072aa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/c64b7b6a0b7f4952bd624678e4a072aa 2024-11-07T17:15:48,444 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/c64b7b6a0b7f4952bd624678e4a072aa, entries=150, sequenceid=79, filesize=11.7 K 2024-11-07T17:15:48,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/bd4d8b4587714c3bae3ce10262e8a385 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bd4d8b4587714c3bae3ce10262e8a385 2024-11-07T17:15:48,456 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bd4d8b4587714c3bae3ce10262e8a385, entries=150, sequenceid=79, filesize=11.7 K 2024-11-07T17:15:48,457 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 5c144e9b474530f0a58afb1f68827ff6 in 617ms, sequenceid=79, compaction requested=false 2024-11-07T17:15:48,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:48,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-07T17:15:48,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-07T17:15:48,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-07T17:15:48,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0800 sec 2024-11-07T17:15:48,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.0880 sec 2024-11-07T17:15:48,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T17:15:48,480 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-07T17:15:48,482 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:48,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-07T17:15:48,484 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:48,485 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:48,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:48,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:48,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-07T17:15:48,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:48,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:48,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:48,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107604579dd391d4c0ca9cf5f3d3c0fc0e6_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:48,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999808562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999808564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999808564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999808566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999808567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741985_1161 (size=12154) 2024-11-07T17:15:48,575 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:48,580 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107604579dd391d4c0ca9cf5f3d3c0fc0e6_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107604579dd391d4c0ca9cf5f3d3c0fc0e6_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:48,582 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/66234dbeb3f54674ae050a531e74b416, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:48,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/66234dbeb3f54674ae050a531e74b416 is 175, key is test_row_0/A:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:48,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:48,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741986_1162 (size=30955) 2024-11-07T17:15:48,637 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:48,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:48,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999808668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999808670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999808671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999808675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999808676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:48,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999808873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999808874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999808875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999808885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999808885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,945 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:48,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:48,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:48,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:48,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:48,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=101, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/66234dbeb3f54674ae050a531e74b416 2024-11-07T17:15:49,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/089e6f5652e04b2790f74467369739d3 is 50, key is test_row_0/B:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:49,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741987_1163 (size=12001) 2024-11-07T17:15:49,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/089e6f5652e04b2790f74467369739d3 2024-11-07T17:15:49,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/f202d7b991514def8aa6ed0291bacce5 is 50, key is test_row_0/C:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:49,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741988_1164 (size=12001) 2024-11-07T17:15:49,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:49,098 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:49,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:49,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999809179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999809179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999809187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999809189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999809190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,253 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:49,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:49,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,409 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:49,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:49,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:49,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/f202d7b991514def8aa6ed0291bacce5 2024-11-07T17:15:49,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/66234dbeb3f54674ae050a531e74b416 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416 2024-11-07T17:15:49,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416, entries=150, sequenceid=101, filesize=30.2 K 2024-11-07T17:15:49,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/089e6f5652e04b2790f74467369739d3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/089e6f5652e04b2790f74467369739d3 2024-11-07T17:15:49,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/089e6f5652e04b2790f74467369739d3, entries=150, sequenceid=101, filesize=11.7 K 2024-11-07T17:15:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/f202d7b991514def8aa6ed0291bacce5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f202d7b991514def8aa6ed0291bacce5 2024-11-07T17:15:49,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f202d7b991514def8aa6ed0291bacce5, entries=150, sequenceid=101, filesize=11.7 K 2024-11-07T17:15:49,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 5c144e9b474530f0a58afb1f68827ff6 in 947ms, sequenceid=101, compaction requested=true 2024-11-07T17:15:49,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:49,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:49,491 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:49,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:49,491 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:49,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:49,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:49,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:49,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:49,493 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:49,493 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:15:49,493 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,493 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1e81fb63ede24570bee2e66b66e0036d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=90.8 K 2024-11-07T17:15:49,493 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,493 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1e81fb63ede24570bee2e66b66e0036d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416] 2024-11-07T17:15:49,494 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e81fb63ede24570bee2e66b66e0036d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1730999746652 2024-11-07T17:15:49,494 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:49,494 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:15:49,494 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a76286bcf1840eeb77d0d7d2571bfcf, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1730999747286 2024-11-07T17:15:49,494 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,494 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3c1117032ecf458da13e28c90591fcaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/c64b7b6a0b7f4952bd624678e4a072aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/089e6f5652e04b2790f74467369739d3] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=35.3 K 2024-11-07T17:15:49,495 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66234dbeb3f54674ae050a531e74b416, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1730999747926 2024-11-07T17:15:49,495 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c1117032ecf458da13e28c90591fcaa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1730999746652 2024-11-07T17:15:49,496 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c64b7b6a0b7f4952bd624678e4a072aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1730999747286 2024-11-07T17:15:49,497 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 089e6f5652e04b2790f74467369739d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1730999747926 2024-11-07T17:15:49,504 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:49,506 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107edcd546f99f3476f86474e9c43a31785_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:49,509 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107edcd546f99f3476f86474e9c43a31785_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:49,510 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107edcd546f99f3476f86474e9c43a31785_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:49,518 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#142 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:49,519 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/b0d1964d25bb432984ee16ca95f0cb13 is 50, key is test_row_0/B:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:49,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741990_1166 (size=12207) 2024-11-07T17:15:49,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741989_1165 (size=4469) 2024-11-07T17:15:49,550 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/b0d1964d25bb432984ee16ca95f0cb13 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b0d1964d25bb432984ee16ca95f0cb13 2024-11-07T17:15:49,560 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into b0d1964d25bb432984ee16ca95f0cb13(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:49,560 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:49,560 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999749491; duration=0sec 2024-11-07T17:15:49,560 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:49,561 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:15:49,561 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:49,562 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:49,562 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:15:49,562 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,562 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/33fc364a51e9486a85d46452273dae8d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bd4d8b4587714c3bae3ce10262e8a385, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f202d7b991514def8aa6ed0291bacce5] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=35.3 K 2024-11-07T17:15:49,564 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,564 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 33fc364a51e9486a85d46452273dae8d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1730999746652 2024-11-07T17:15:49,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T17:15:49,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:49,565 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting bd4d8b4587714c3bae3ce10262e8a385, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1730999747286 2024-11-07T17:15:49,565 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T17:15:49,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:49,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:49,565 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f202d7b991514def8aa6ed0291bacce5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1730999747926 2024-11-07T17:15:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:49,582 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#143 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:49,583 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a0665bc4473c4d8594f19dafd2232ed1 is 50, key is test_row_0/C:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:49,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075d4a88b5067a48adb603691e3ce64b53_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999748562/Put/seqid=0 2024-11-07T17:15:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:49,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741992_1168 (size=12154) 2024-11-07T17:15:49,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741991_1167 (size=12207) 2024-11-07T17:15:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:49,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999809705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999809706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999809708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999809710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999809711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999809813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999809813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999809814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999809816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999809817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:49,952 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#141 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:49,953 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/3b7263a622264f539d49a428b69042fa is 175, key is test_row_0/A:col10/1730999747926/Put/seqid=0 2024-11-07T17:15:49,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741993_1169 (size=31161) 2024-11-07T17:15:50,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:50,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999810016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999810018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999810021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999810025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999810025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,033 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075d4a88b5067a48adb603691e3ce64b53_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075d4a88b5067a48adb603691e3ce64b53_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:50,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/71fc3a4d295b4db785152eb71dcb91ad, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:50,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/71fc3a4d295b4db785152eb71dcb91ad is 175, key is test_row_0/A:col10/1730999748562/Put/seqid=0 2024-11-07T17:15:50,044 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a0665bc4473c4d8594f19dafd2232ed1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a0665bc4473c4d8594f19dafd2232ed1 2024-11-07T17:15:50,051 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into a0665bc4473c4d8594f19dafd2232ed1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:50,051 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:50,051 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999749492; duration=0sec 2024-11-07T17:15:50,051 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:50,051 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:15:50,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741994_1170 (size=30955) 2024-11-07T17:15:50,102 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T17:15:50,103 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T17:15:50,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999810322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999810324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999810329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999810326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999810330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,402 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/3b7263a622264f539d49a428b69042fa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3b7263a622264f539d49a428b69042fa 2024-11-07T17:15:50,424 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 3b7263a622264f539d49a428b69042fa(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:50,424 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:50,424 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999749491; duration=0sec 2024-11-07T17:15:50,424 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:50,424 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:15:50,483 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/71fc3a4d295b4db785152eb71dcb91ad 2024-11-07T17:15:50,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/b268afec57f041b3bd1af9315a78cdcf is 50, key is test_row_0/B:col10/1730999748562/Put/seqid=0 2024-11-07T17:15:50,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741995_1171 (size=12001) 2024-11-07T17:15:50,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:50,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999810826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999810828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999810833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999810833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:50,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999810836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:50,951 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/b268afec57f041b3bd1af9315a78cdcf 2024-11-07T17:15:50,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/beca7f2e646d417fa62fd4be4502a038 is 50, key is test_row_0/C:col10/1730999748562/Put/seqid=0 2024-11-07T17:15:51,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741996_1172 (size=12001) 2024-11-07T17:15:51,018 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/beca7f2e646d417fa62fd4be4502a038 2024-11-07T17:15:51,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/71fc3a4d295b4db785152eb71dcb91ad as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad 2024-11-07T17:15:51,036 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad, entries=150, sequenceid=119, filesize=30.2 K 2024-11-07T17:15:51,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/b268afec57f041b3bd1af9315a78cdcf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b268afec57f041b3bd1af9315a78cdcf 2024-11-07T17:15:51,046 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b268afec57f041b3bd1af9315a78cdcf, entries=150, sequenceid=119, filesize=11.7 K 2024-11-07T17:15:51,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/beca7f2e646d417fa62fd4be4502a038 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/beca7f2e646d417fa62fd4be4502a038 2024-11-07T17:15:51,057 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/beca7f2e646d417fa62fd4be4502a038, entries=150, sequenceid=119, filesize=11.7 K 2024-11-07T17:15:51,059 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 5c144e9b474530f0a58afb1f68827ff6 in 1494ms, sequenceid=119, compaction requested=false 2024-11-07T17:15:51,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:51,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:51,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-07T17:15:51,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-07T17:15:51,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-07T17:15:51,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5760 sec 2024-11-07T17:15:51,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.5820 sec 2024-11-07T17:15:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:51,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T17:15:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:51,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b787509e96e9400ca8c1c7fe7dab06ab_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:51,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999811849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999811850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999811851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999811852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741997_1173 (size=14794) 2024-11-07T17:15:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999811852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,859 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:51,866 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b787509e96e9400ca8c1c7fe7dab06ab_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b787509e96e9400ca8c1c7fe7dab06ab_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:51,868 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/41749e908c3f40c0a1b939f382998b90, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:51,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/41749e908c3f40c0a1b939f382998b90 is 175, key is test_row_0/A:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:51,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741998_1174 (size=39749) 2024-11-07T17:15:51,879 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=141, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/41749e908c3f40c0a1b939f382998b90 2024-11-07T17:15:51,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6fad5c4707f84cdc8463a8563d524d5d is 50, key is test_row_0/B:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:51,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741999_1175 (size=12151) 2024-11-07T17:15:51,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6fad5c4707f84cdc8463a8563d524d5d 2024-11-07T17:15:51,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/f39115a3a32a4bcbaabd123ef5f5691e is 50, key is test_row_0/C:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:51,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742000_1176 (size=12151) 2024-11-07T17:15:51,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/f39115a3a32a4bcbaabd123ef5f5691e 2024-11-07T17:15:51,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999811957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999811957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999811958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999811959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/41749e908c3f40c0a1b939f382998b90 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90 2024-11-07T17:15:51,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:51,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999811959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:51,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90, entries=200, sequenceid=141, filesize=38.8 K 2024-11-07T17:15:51,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6fad5c4707f84cdc8463a8563d524d5d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6fad5c4707f84cdc8463a8563d524d5d 2024-11-07T17:15:51,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6fad5c4707f84cdc8463a8563d524d5d, entries=150, sequenceid=141, filesize=11.9 K 2024-11-07T17:15:51,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/f39115a3a32a4bcbaabd123ef5f5691e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f39115a3a32a4bcbaabd123ef5f5691e 2024-11-07T17:15:51,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f39115a3a32a4bcbaabd123ef5f5691e, entries=150, sequenceid=141, filesize=11.9 K 2024-11-07T17:15:51,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 5c144e9b474530f0a58afb1f68827ff6 in 165ms, sequenceid=141, compaction requested=true 2024-11-07T17:15:51,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:51,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:51,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:51,996 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:51,996 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:51,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:51,998 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:51,998 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:15:51,998 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:51,998 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b0d1964d25bb432984ee16ca95f0cb13, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b268afec57f041b3bd1af9315a78cdcf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6fad5c4707f84cdc8463a8563d524d5d] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=35.5 K 2024-11-07T17:15:51,998 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:51,998 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:15:51,998 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:51,998 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b0d1964d25bb432984ee16ca95f0cb13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1730999747926 2024-11-07T17:15:51,999 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3b7263a622264f539d49a428b69042fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=99.5 K 2024-11-07T17:15:51,999 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:51,999 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3b7263a622264f539d49a428b69042fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90] 2024-11-07T17:15:51,999 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b268afec57f041b3bd1af9315a78cdcf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1730999748562 2024-11-07T17:15:51,999 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b7263a622264f539d49a428b69042fa, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1730999747926 2024-11-07T17:15:51,999 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fad5c4707f84cdc8463a8563d524d5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1730999749701 2024-11-07T17:15:52,000 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71fc3a4d295b4db785152eb71dcb91ad, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1730999748562 2024-11-07T17:15:52,001 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41749e908c3f40c0a1b939f382998b90, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1730999749701 2024-11-07T17:15:52,013 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:52,013 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/1947152e3d314ce88947053f34e54cf9 is 50, key is test_row_0/B:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:52,023 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:52,051 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107649f473e599d4cf7b248fbbaee048321_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:52,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742001_1177 (size=12459) 2024-11-07T17:15:52,053 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107649f473e599d4cf7b248fbbaee048321_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:52,053 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107649f473e599d4cf7b248fbbaee048321_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:52,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742002_1178 (size=4469) 2024-11-07T17:15:52,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:52,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:15:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:52,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078f8382cd7d054c73b4ecfd10bdfb1f3e_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999752162/Put/seqid=0 2024-11-07T17:15:52,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999812197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999812204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999812197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999812205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999812206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742003_1179 (size=12304) 2024-11-07T17:15:52,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999812311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999812311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999812311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999812315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999812316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,460 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/1947152e3d314ce88947053f34e54cf9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1947152e3d314ce88947053f34e54cf9 2024-11-07T17:15:52,466 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into 1947152e3d314ce88947053f34e54cf9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:52,466 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:52,466 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999751996; duration=0sec 2024-11-07T17:15:52,466 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:52,466 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:15:52,466 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:52,468 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:52,468 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:15:52,468 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:52,468 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a0665bc4473c4d8594f19dafd2232ed1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/beca7f2e646d417fa62fd4be4502a038, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f39115a3a32a4bcbaabd123ef5f5691e] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=35.5 K 2024-11-07T17:15:52,469 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a0665bc4473c4d8594f19dafd2232ed1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1730999747926 2024-11-07T17:15:52,469 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting beca7f2e646d417fa62fd4be4502a038, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1730999748562 2024-11-07T17:15:52,470 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f39115a3a32a4bcbaabd123ef5f5691e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1730999749701 2024-11-07T17:15:52,490 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:52,491 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/79aab6ed2700480cb690ba291058772a is 50, key is test_row_0/C:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:52,502 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T17:15:52,502 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-07T17:15:52,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742004_1180 (size=12459) 2024-11-07T17:15:52,520 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#151 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:52,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999812517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999812518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,521 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/0b55e51d194e4e379db530c94be43be4 is 175, key is test_row_0/A:col10/1730999749710/Put/seqid=0 2024-11-07T17:15:52,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999812518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999812519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999812519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742005_1181 (size=31413) 2024-11-07T17:15:52,551 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/0b55e51d194e4e379db530c94be43be4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/0b55e51d194e4e379db530c94be43be4 2024-11-07T17:15:52,560 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 0b55e51d194e4e379db530c94be43be4(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:52,560 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:52,560 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999751996; duration=0sec 2024-11-07T17:15:52,560 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:52,560 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:15:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T17:15:52,591 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-07T17:15:52,592 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-07T17:15:52,595 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:52,597 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:52,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T17:15:52,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:52,654 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078f8382cd7d054c73b4ecfd10bdfb1f3e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078f8382cd7d054c73b4ecfd10bdfb1f3e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:52,656 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/beddcab2bb684931ab571793a4e22189, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:52,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/beddcab2bb684931ab571793a4e22189 is 175, key is test_row_0/A:col10/1730999752162/Put/seqid=0 2024-11-07T17:15:52,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742006_1182 (size=31105) 2024-11-07T17:15:52,666 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/beddcab2bb684931ab571793a4e22189 2024-11-07T17:15:52,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6a56629c5ad74dd68e786830ca5d2167 is 50, key is test_row_0/B:col10/1730999752162/Put/seqid=0 2024-11-07T17:15:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T17:15:52,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742007_1183 (size=12151) 2024-11-07T17:15:52,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6a56629c5ad74dd68e786830ca5d2167 2024-11-07T17:15:52,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T17:15:52,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:52,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:52,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:52,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:52,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:52,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/d550d8acdb4b49c7b9db1928dcdfa00a is 50, key is test_row_0/C:col10/1730999752162/Put/seqid=0 2024-11-07T17:15:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:52,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742008_1184 (size=12151) 2024-11-07T17:15:52,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/d550d8acdb4b49c7b9db1928dcdfa00a 2024-11-07T17:15:52,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/beddcab2bb684931ab571793a4e22189 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189 2024-11-07T17:15:52,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999812822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999812823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999812824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999812825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999812825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189, entries=150, sequenceid=161, filesize=30.4 K 2024-11-07T17:15:52,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6a56629c5ad74dd68e786830ca5d2167 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6a56629c5ad74dd68e786830ca5d2167 2024-11-07T17:15:52,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6a56629c5ad74dd68e786830ca5d2167, entries=150, sequenceid=161, filesize=11.9 K 2024-11-07T17:15:52,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/d550d8acdb4b49c7b9db1928dcdfa00a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d550d8acdb4b49c7b9db1928dcdfa00a 2024-11-07T17:15:52,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d550d8acdb4b49c7b9db1928dcdfa00a, entries=150, sequenceid=161, filesize=11.9 K 2024-11-07T17:15:52,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 5c144e9b474530f0a58afb1f68827ff6 in 695ms, sequenceid=161, compaction requested=false 2024-11-07T17:15:52,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T17:15:52,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:52,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T17:15:52,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:52,904 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:15:52,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:52,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:52,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:52,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:52,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:52,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:52,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a6a1cbb17c4343859c9127e933a47d86_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999752204/Put/seqid=0 2024-11-07T17:15:52,946 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/79aab6ed2700480cb690ba291058772a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/79aab6ed2700480cb690ba291058772a 2024-11-07T17:15:52,955 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into 79aab6ed2700480cb690ba291058772a(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:52,956 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:52,956 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999751997; duration=0sec 2024-11-07T17:15:52,956 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:52,956 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:15:52,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742009_1185 (size=12304) 2024-11-07T17:15:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T17:15:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:53,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:53,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999813353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999813351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999813354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999813356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999813356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:53,385 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a6a1cbb17c4343859c9127e933a47d86_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a6a1cbb17c4343859c9127e933a47d86_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:53,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/de2116fd2f6b447a8b88794bdc5f5d2d, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:53,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/de2116fd2f6b447a8b88794bdc5f5d2d is 175, key is test_row_0/A:col10/1730999752204/Put/seqid=0 2024-11-07T17:15:53,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742010_1186 (size=31105) 2024-11-07T17:15:53,410 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=179, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/de2116fd2f6b447a8b88794bdc5f5d2d 2024-11-07T17:15:53,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/aef67027dbd54904a211c1af8ae6ab43 is 50, key is test_row_0/B:col10/1730999752204/Put/seqid=0 2024-11-07T17:15:53,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999813458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999813458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999813459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999813460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999813460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742011_1187 (size=12151) 2024-11-07T17:15:53,521 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/aef67027dbd54904a211c1af8ae6ab43 2024-11-07T17:15:53,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/bff86eb3eccf493ab798522fe1d1a174 is 50, key is test_row_0/C:col10/1730999752204/Put/seqid=0 2024-11-07T17:15:53,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742012_1188 (size=12151) 2024-11-07T17:15:53,574 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/bff86eb3eccf493ab798522fe1d1a174 2024-11-07T17:15:53,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/de2116fd2f6b447a8b88794bdc5f5d2d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d 2024-11-07T17:15:53,596 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d, entries=150, sequenceid=179, filesize=30.4 K 2024-11-07T17:15:53,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/aef67027dbd54904a211c1af8ae6ab43 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/aef67027dbd54904a211c1af8ae6ab43 2024-11-07T17:15:53,605 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/aef67027dbd54904a211c1af8ae6ab43, entries=150, sequenceid=179, filesize=11.9 K 2024-11-07T17:15:53,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/bff86eb3eccf493ab798522fe1d1a174 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bff86eb3eccf493ab798522fe1d1a174 2024-11-07T17:15:53,612 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bff86eb3eccf493ab798522fe1d1a174, entries=150, sequenceid=179, filesize=11.9 K 2024-11-07T17:15:53,613 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 5c144e9b474530f0a58afb1f68827ff6 in 709ms, sequenceid=179, compaction requested=true 2024-11-07T17:15:53,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:53,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:53,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-07T17:15:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-07T17:15:53,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-07T17:15:53,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0180 sec 2024-11-07T17:15:53,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.0250 sec 2024-11-07T17:15:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:53,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:15:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:53,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070bc80e8a6d1a41a98e6ea3a3bd089e9e_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:53,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999813678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999813679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999813680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999813680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999813681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T17:15:53,705 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-07T17:15:53,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742013_1189 (size=14794) 2024-11-07T17:15:53,708 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:53,708 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-07T17:15:53,710 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T17:15:53,711 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:53,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:53,714 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070bc80e8a6d1a41a98e6ea3a3bd089e9e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070bc80e8a6d1a41a98e6ea3a3bd089e9e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:53,715 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/e857e577f58041929e698f5118e50a6a, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:53,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/e857e577f58041929e698f5118e50a6a is 175, key is test_row_0/A:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:53,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742014_1190 (size=39749) 2024-11-07T17:15:53,785 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=202, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/e857e577f58041929e698f5118e50a6a 2024-11-07T17:15:53,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999813784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999813784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999813784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999813785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999813785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/5b057b6b7d1d47728a33b9eecb992426 is 50, key is test_row_0/B:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:53,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T17:15:53,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742015_1191 (size=12151) 2024-11-07T17:15:53,863 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:53,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:53,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:53,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999813990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999813991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999813991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999813991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:53,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:53,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999813993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T17:15:54,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:54,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:54,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,170 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/5b057b6b7d1d47728a33b9eecb992426 2024-11-07T17:15:54,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9cb530ea73ca45598890b45c54c7c69f is 50, key is test_row_0/C:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:54,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742016_1192 (size=12151) 2024-11-07T17:15:54,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999814300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999814300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999814300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999814300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999814300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T17:15:54,325 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:54,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:54,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:54,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:54,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,631 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:54,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:54,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9cb530ea73ca45598890b45c54c7c69f 2024-11-07T17:15:54,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/e857e577f58041929e698f5118e50a6a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a 2024-11-07T17:15:54,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a, entries=200, sequenceid=202, filesize=38.8 K 2024-11-07T17:15:54,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/5b057b6b7d1d47728a33b9eecb992426 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/5b057b6b7d1d47728a33b9eecb992426 2024-11-07T17:15:54,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/5b057b6b7d1d47728a33b9eecb992426, entries=150, sequenceid=202, filesize=11.9 K 2024-11-07T17:15:54,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9cb530ea73ca45598890b45c54c7c69f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9cb530ea73ca45598890b45c54c7c69f 2024-11-07T17:15:54,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9cb530ea73ca45598890b45c54c7c69f, entries=150, sequenceid=202, filesize=11.9 K 2024-11-07T17:15:54,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5c144e9b474530f0a58afb1f68827ff6 in 1046ms, sequenceid=202, compaction requested=true 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:54,709 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:15:54,709 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:54,711 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133372 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:54,711 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:15:54,711 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,711 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/0b55e51d194e4e379db530c94be43be4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=130.2 K 2024-11-07T17:15:54,711 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:54,711 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,711 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:15:54,711 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/0b55e51d194e4e379db530c94be43be4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a] 2024-11-07T17:15:54,711 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,711 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1947152e3d314ce88947053f34e54cf9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6a56629c5ad74dd68e786830ca5d2167, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/aef67027dbd54904a211c1af8ae6ab43, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/5b057b6b7d1d47728a33b9eecb992426] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=47.8 K 2024-11-07T17:15:54,712 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b55e51d194e4e379db530c94be43be4, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1730999749701 2024-11-07T17:15:54,712 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1947152e3d314ce88947053f34e54cf9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1730999749701 2024-11-07T17:15:54,712 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting beddcab2bb684931ab571793a4e22189, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1730999751848 2024-11-07T17:15:54,712 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a56629c5ad74dd68e786830ca5d2167, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1730999751848 2024-11-07T17:15:54,713 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting de2116fd2f6b447a8b88794bdc5f5d2d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1730999752189 2024-11-07T17:15:54,713 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting aef67027dbd54904a211c1af8ae6ab43, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1730999752189 2024-11-07T17:15:54,713 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e857e577f58041929e698f5118e50a6a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730999753344 2024-11-07T17:15:54,713 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b057b6b7d1d47728a33b9eecb992426, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730999753355 2024-11-07T17:15:54,733 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:54,734 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:54,736 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107aa63e151ebe4458588583f8f11183fe7_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:54,736 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/bcb0e11ab039489db35ad8ab3f12b429 is 50, key is test_row_0/B:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:54,750 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107aa63e151ebe4458588583f8f11183fe7_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:54,750 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107aa63e151ebe4458588583f8f11183fe7_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:54,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742017_1193 (size=12595) 2024-11-07T17:15:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742018_1194 (size=4469) 2024-11-07T17:15:54,774 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/bcb0e11ab039489db35ad8ab3f12b429 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/bcb0e11ab039489db35ad8ab3f12b429 2024-11-07T17:15:54,775 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#163 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:54,775 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/3993fac3a9ce4c6588724044de11f84e is 175, key is test_row_0/A:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:54,782 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into bcb0e11ab039489db35ad8ab3f12b429(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:54,782 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:54,782 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=12, startTime=1730999754709; duration=0sec 2024-11-07T17:15:54,782 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:54,782 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:15:54,782 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:15:54,784 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:15:54,784 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:15:54,784 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,784 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,784 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/79aab6ed2700480cb690ba291058772a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d550d8acdb4b49c7b9db1928dcdfa00a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bff86eb3eccf493ab798522fe1d1a174, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9cb530ea73ca45598890b45c54c7c69f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=47.8 K 2024-11-07T17:15:54,785 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79aab6ed2700480cb690ba291058772a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1730999749701 2024-11-07T17:15:54,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T17:15:54,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:54,785 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d550d8acdb4b49c7b9db1928dcdfa00a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1730999751848 2024-11-07T17:15:54,785 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:15:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:54,786 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting bff86eb3eccf493ab798522fe1d1a174, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1730999752189 2024-11-07T17:15:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:54,786 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cb530ea73ca45598890b45c54c7c69f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730999753355 2024-11-07T17:15:54,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742019_1195 (size=31549) 2024-11-07T17:15:54,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:54,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:54,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T17:15:54,816 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#164 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:54,816 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/5e3014ad02e64b54b8485f97b575a178 is 50, key is test_row_0/C:col10/1730999753355/Put/seqid=0 2024-11-07T17:15:54,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110721dd706ba8c345f49ecbf0f4c89720fd_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999753679/Put/seqid=0 2024-11-07T17:15:54,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999814848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999814848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999814848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999814849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999814849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742020_1196 (size=12304) 2024-11-07T17:15:54,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742021_1197 (size=12595) 2024-11-07T17:15:54,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999814954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999814955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999814955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999814956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:54,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:54,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999814959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999815157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999815159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999815159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999815159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999815164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,231 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/3993fac3a9ce4c6588724044de11f84e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3993fac3a9ce4c6588724044de11f84e 2024-11-07T17:15:55,238 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 3993fac3a9ce4c6588724044de11f84e(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:55,238 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:55,238 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=12, startTime=1730999754709; duration=0sec 2024-11-07T17:15:55,238 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:55,238 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:15:55,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:55,266 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110721dd706ba8c345f49ecbf0f4c89720fd_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110721dd706ba8c345f49ecbf0f4c89720fd_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:55,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/269eab1018574de690af2b3f231d6e0c, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:55,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/269eab1018574de690af2b3f231d6e0c is 175, key is test_row_0/A:col10/1730999753679/Put/seqid=0 2024-11-07T17:15:55,275 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/5e3014ad02e64b54b8485f97b575a178 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/5e3014ad02e64b54b8485f97b575a178 2024-11-07T17:15:55,286 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into 5e3014ad02e64b54b8485f97b575a178(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:55,287 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:55,287 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=12, startTime=1730999754709; duration=0sec 2024-11-07T17:15:55,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742022_1198 (size=31105) 2024-11-07T17:15:55,287 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:55,287 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:15:55,288 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=217, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/269eab1018574de690af2b3f231d6e0c 2024-11-07T17:15:55,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/9635665c0e8441a59305d7e7246e2e80 is 50, key is test_row_0/B:col10/1730999753679/Put/seqid=0 2024-11-07T17:15:55,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742023_1199 (size=12151) 2024-11-07T17:15:55,332 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/9635665c0e8441a59305d7e7246e2e80 2024-11-07T17:15:55,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/ae131beb3d8a4ba78e4869567c3fada4 is 50, key is test_row_0/C:col10/1730999753679/Put/seqid=0 2024-11-07T17:15:55,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742024_1200 (size=12151) 2024-11-07T17:15:55,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999815461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999815461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999815462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999815464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999815471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,775 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/ae131beb3d8a4ba78e4869567c3fada4 2024-11-07T17:15:55,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/269eab1018574de690af2b3f231d6e0c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c 2024-11-07T17:15:55,787 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c, entries=150, sequenceid=217, filesize=30.4 K 2024-11-07T17:15:55,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/9635665c0e8441a59305d7e7246e2e80 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/9635665c0e8441a59305d7e7246e2e80 2024-11-07T17:15:55,797 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/9635665c0e8441a59305d7e7246e2e80, entries=150, sequenceid=217, filesize=11.9 K 2024-11-07T17:15:55,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/ae131beb3d8a4ba78e4869567c3fada4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/ae131beb3d8a4ba78e4869567c3fada4 2024-11-07T17:15:55,803 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/ae131beb3d8a4ba78e4869567c3fada4, entries=150, sequenceid=217, filesize=11.9 K 2024-11-07T17:15:55,804 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 5c144e9b474530f0a58afb1f68827ff6 in 1019ms, sequenceid=217, compaction requested=false 2024-11-07T17:15:55,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:55,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:55,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-07T17:15:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-07T17:15:55,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-07T17:15:55,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0940 sec 2024-11-07T17:15:55,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.1000 sec 2024-11-07T17:15:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T17:15:55,814 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-07T17:15:55,816 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-07T17:15:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T17:15:55,817 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:55,818 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:55,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T17:15:55,969 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:55,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T17:15:55,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:55,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:55,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:55,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:55,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:55,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:55,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T17:15:55,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:55,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:55,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:55,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:55,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:55,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ddf7ba7ada1347f89d5af701daab965f_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:55,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999815984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999815984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999815987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999815990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:55,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:55,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999815992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742025_1201 (size=17284) 2024-11-07T17:15:56,008 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,014 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ddf7ba7ada1347f89d5af701daab965f_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ddf7ba7ada1347f89d5af701daab965f_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:56,015 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2bc5b62180c349b99ee5ea70b63915be, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:56,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2bc5b62180c349b99ee5ea70b63915be is 175, key is test_row_0/A:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742026_1202 (size=48389) 2024-11-07T17:15:56,051 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2bc5b62180c349b99ee5ea70b63915be 2024-11-07T17:15:56,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/949e367dc2c84146b1cf50fac53d5b45 is 50, key is test_row_0/B:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:56,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999816093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999816094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999816095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999816095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999816096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742027_1203 (size=12151) 2024-11-07T17:15:56,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/949e367dc2c84146b1cf50fac53d5b45 2024-11-07T17:15:56,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T17:15:56,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T17:15:56,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/583bb4ede50147ee9c7107841c8fb93c is 50, key is test_row_0/C:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:56,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:56,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742028_1204 (size=12151) 2024-11-07T17:15:56,279 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T17:15:56,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:56,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999816296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999816296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999816296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999816299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999816300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T17:15:56,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T17:15:56,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:56,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/583bb4ede50147ee9c7107841c8fb93c 2024-11-07T17:15:56,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2bc5b62180c349b99ee5ea70b63915be as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be 2024-11-07T17:15:56,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be, entries=250, sequenceid=244, filesize=47.3 K 2024-11-07T17:15:56,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/949e367dc2c84146b1cf50fac53d5b45 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/949e367dc2c84146b1cf50fac53d5b45 2024-11-07T17:15:56,587 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T17:15:56,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:56,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/949e367dc2c84146b1cf50fac53d5b45, entries=150, sequenceid=244, filesize=11.9 K 2024-11-07T17:15:56,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:56,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/583bb4ede50147ee9c7107841c8fb93c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/583bb4ede50147ee9c7107841c8fb93c 2024-11-07T17:15:56,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/583bb4ede50147ee9c7107841c8fb93c, entries=150, sequenceid=244, filesize=11.9 K 2024-11-07T17:15:56,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5c144e9b474530f0a58afb1f68827ff6 in 637ms, sequenceid=244, compaction requested=true 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:56,606 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:56,606 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:56,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:56,608 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:56,608 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:15:56,608 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,608 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111043 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:56,608 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:15:56,608 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/bcb0e11ab039489db35ad8ab3f12b429, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/9635665c0e8441a59305d7e7246e2e80, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/949e367dc2c84146b1cf50fac53d5b45] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.0 K 2024-11-07T17:15:56,608 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,608 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3993fac3a9ce4c6588724044de11f84e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=108.4 K 2024-11-07T17:15:56,609 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,609 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3993fac3a9ce4c6588724044de11f84e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be] 2024-11-07T17:15:56,609 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting bcb0e11ab039489db35ad8ab3f12b429, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730999753355 2024-11-07T17:15:56,609 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3993fac3a9ce4c6588724044de11f84e, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730999753355 2024-11-07T17:15:56,610 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9635665c0e8441a59305d7e7246e2e80, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1730999753668 2024-11-07T17:15:56,610 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 269eab1018574de690af2b3f231d6e0c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1730999753668 2024-11-07T17:15:56,610 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 949e367dc2c84146b1cf50fac53d5b45, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1730999754848 2024-11-07T17:15:56,610 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bc5b62180c349b99ee5ea70b63915be, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1730999754845 2024-11-07T17:15:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,645 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:56,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,653 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411079f9c654f47994dfe90d329f967cbf7cf_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:56,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,664 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411079f9c654f47994dfe90d329f967cbf7cf_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:56,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,665 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079f9c654f47994dfe90d329f967cbf7cf_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:56,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,682 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#172 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:56,683 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/726e13cbbc604081b9c7e8a9619f28d4 is 50, key is test_row_0/B:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:56,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742029_1205 (size=4469) 2024-11-07T17:15:56,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T17:15:56,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:56,745 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:15:56,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742030_1206 (size=12697) 2024-11-07T17:15:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:56,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:56,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:56,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079f592a41421f48d890a5e6aebfd36661_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999756745/Put/seqid=0 2024-11-07T17:15:56,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742031_1207 (size=12304) 2024-11-07T17:15:56,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:56,788 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079f592a41421f48d890a5e6aebfd36661_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079f592a41421f48d890a5e6aebfd36661_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:56,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c24b37f7d011471a877d99c840792f40, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:56,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c24b37f7d011471a877d99c840792f40 is 175, key is test_row_0/A:col10/1730999756745/Put/seqid=0 2024-11-07T17:15:56,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742032_1208 (size=31105) 2024-11-07T17:15:56,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T17:15:56,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999816925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999816926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999816926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999816928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:56,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:56,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999816930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999817040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999817041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999817041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999817043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999817049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,127 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#171 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:57,128 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/38fe0dc3b17247c2942933e14677ea37 is 175, key is test_row_0/A:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:57,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742033_1209 (size=31651) 2024-11-07T17:15:57,161 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/38fe0dc3b17247c2942933e14677ea37 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/38fe0dc3b17247c2942933e14677ea37 2024-11-07T17:15:57,162 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/726e13cbbc604081b9c7e8a9619f28d4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/726e13cbbc604081b9c7e8a9619f28d4 2024-11-07T17:15:57,169 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 38fe0dc3b17247c2942933e14677ea37(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:57,169 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:57,169 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999756606; duration=0sec 2024-11-07T17:15:57,169 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:57,169 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:15:57,170 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:57,172 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:57,173 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:15:57,173 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:57,173 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/5e3014ad02e64b54b8485f97b575a178, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/ae131beb3d8a4ba78e4869567c3fada4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/583bb4ede50147ee9c7107841c8fb93c] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.0 K 2024-11-07T17:15:57,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e3014ad02e64b54b8485f97b575a178, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730999753355 2024-11-07T17:15:57,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae131beb3d8a4ba78e4869567c3fada4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1730999753668 2024-11-07T17:15:57,177 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 583bb4ede50147ee9c7107841c8fb93c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1730999754848 2024-11-07T17:15:57,180 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into 726e13cbbc604081b9c7e8a9619f28d4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:57,180 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:57,180 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999756606; duration=0sec 2024-11-07T17:15:57,180 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:57,180 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:15:57,194 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#174 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:57,195 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/4afb2a657be74746b1109a355a8e88e0 is 50, key is test_row_0/C:col10/1730999755968/Put/seqid=0 2024-11-07T17:15:57,215 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c24b37f7d011471a877d99c840792f40 2024-11-07T17:15:57,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/8c803a4a148043cd91c2da3c7f53335a is 50, key is test_row_0/B:col10/1730999756745/Put/seqid=0 2024-11-07T17:15:57,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742034_1210 (size=12697) 2024-11-07T17:15:57,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999817243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999817245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999817246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999817253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999817254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742035_1211 (size=12151) 2024-11-07T17:15:57,271 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/8c803a4a148043cd91c2da3c7f53335a 2024-11-07T17:15:57,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/3261530fb680442fa5db93639151c7fd is 50, key is test_row_0/C:col10/1730999756745/Put/seqid=0 2024-11-07T17:15:57,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742036_1212 (size=12151) 2024-11-07T17:15:57,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999817549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999817549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999817553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999817557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:57,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999817557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:57,650 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/4afb2a657be74746b1109a355a8e88e0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/4afb2a657be74746b1109a355a8e88e0 2024-11-07T17:15:57,657 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into 4afb2a657be74746b1109a355a8e88e0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:57,657 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:57,657 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999756606; duration=0sec 2024-11-07T17:15:57,657 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:57,657 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:15:57,728 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/3261530fb680442fa5db93639151c7fd 2024-11-07T17:15:57,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c24b37f7d011471a877d99c840792f40 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40 2024-11-07T17:15:57,745 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40, entries=150, sequenceid=256, filesize=30.4 K 2024-11-07T17:15:57,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/8c803a4a148043cd91c2da3c7f53335a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/8c803a4a148043cd91c2da3c7f53335a 2024-11-07T17:15:57,752 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/8c803a4a148043cd91c2da3c7f53335a, entries=150, sequenceid=256, filesize=11.9 K 2024-11-07T17:15:57,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/3261530fb680442fa5db93639151c7fd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/3261530fb680442fa5db93639151c7fd 2024-11-07T17:15:57,764 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/3261530fb680442fa5db93639151c7fd, entries=150, sequenceid=256, filesize=11.9 K 2024-11-07T17:15:57,773 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 5c144e9b474530f0a58afb1f68827ff6 in 1027ms, sequenceid=256, compaction requested=false 2024-11-07T17:15:57,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:57,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:57,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-07T17:15:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-07T17:15:57,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-07T17:15:57,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9560 sec 2024-11-07T17:15:57,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.9610 sec 2024-11-07T17:15:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T17:15:57,923 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-07T17:15:57,924 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:15:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-07T17:15:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:15:57,925 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:15:57,926 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:15:57,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:15:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:15:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:58,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T17:15:58,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:58,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:58,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:58,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:58,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:58,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:58,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999818061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999818062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d5f3e3c91bcd47218a14971ca9ee5d43_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:58,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999818063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999818065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999818063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742037_1213 (size=12454) 2024-11-07T17:15:58,070 INFO [master/3a0fde618c86:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T17:15:58,070 INFO [master/3a0fde618c86:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T17:15:58,078 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:58,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:58,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999818165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999818167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999818168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:15:58,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:58,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:58,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999818369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999818370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999818369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,383 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:58,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:58,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,470 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:58,475 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d5f3e3c91bcd47218a14971ca9ee5d43_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d5f3e3c91bcd47218a14971ca9ee5d43_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:58,478 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/ab1397683d1744f0a229daf569823722, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:58,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/ab1397683d1744f0a229daf569823722 is 175, key is test_row_0/A:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:58,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742038_1214 (size=31255) 2024-11-07T17:15:58,500 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/ab1397683d1744f0a229daf569823722 2024-11-07T17:15:58,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/2ba2d57d6ceb41de99388d29281a0b18 is 50, key is test_row_0/B:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:58,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742039_1215 (size=12301) 2024-11-07T17:15:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:15:58,537 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:58,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:58,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999818676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999818676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999818676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,691 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:58,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:58,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:58,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:58,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:58,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:58,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:58,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/2ba2d57d6ceb41de99388d29281a0b18 2024-11-07T17:15:58,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/78c2c00160f0488e88bd8a95f247a688 is 50, key is test_row_0/C:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:58,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742040_1216 (size=12301) 2024-11-07T17:15:59,001 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:59,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:59,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:15:59,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999819065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999819071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,155 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:59,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:59,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999819180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999819181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:15:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999819183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,308 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:59,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:15:59,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:15:59,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/78c2c00160f0488e88bd8a95f247a688 2024-11-07T17:15:59,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/ab1397683d1744f0a229daf569823722 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722 2024-11-07T17:15:59,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722, entries=150, sequenceid=285, filesize=30.5 K 2024-11-07T17:15:59,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/2ba2d57d6ceb41de99388d29281a0b18 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/2ba2d57d6ceb41de99388d29281a0b18 2024-11-07T17:15:59,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/2ba2d57d6ceb41de99388d29281a0b18, entries=150, sequenceid=285, filesize=12.0 K 2024-11-07T17:15:59,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/78c2c00160f0488e88bd8a95f247a688 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/78c2c00160f0488e88bd8a95f247a688 2024-11-07T17:15:59,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/78c2c00160f0488e88bd8a95f247a688, entries=150, sequenceid=285, filesize=12.0 K 2024-11-07T17:15:59,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 5c144e9b474530f0a58afb1f68827ff6 in 1323ms, sequenceid=285, compaction requested=true 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:59,377 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:15:59,377 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:59,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:59,378 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94011 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:59,378 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:15:59,378 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,379 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/38fe0dc3b17247c2942933e14677ea37, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=91.8 K 2024-11-07T17:15:59,379 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,379 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/38fe0dc3b17247c2942933e14677ea37, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722] 2024-11-07T17:15:59,379 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:59,379 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:15:59,379 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,379 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/726e13cbbc604081b9c7e8a9619f28d4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/8c803a4a148043cd91c2da3c7f53335a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/2ba2d57d6ceb41de99388d29281a0b18] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.3 K 2024-11-07T17:15:59,379 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38fe0dc3b17247c2942933e14677ea37, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1730999754848 2024-11-07T17:15:59,380 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 726e13cbbc604081b9c7e8a9619f28d4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1730999754848 2024-11-07T17:15:59,380 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c24b37f7d011471a877d99c840792f40, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1730999755986 2024-11-07T17:15:59,380 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c803a4a148043cd91c2da3c7f53335a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1730999755986 2024-11-07T17:15:59,381 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab1397683d1744f0a229daf569823722, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999756916 2024-11-07T17:15:59,382 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ba2d57d6ceb41de99388d29281a0b18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999756916 2024-11-07T17:15:59,399 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:59,399 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/36733d0c449a4ca6a92c33f81677336f is 50, key is test_row_0/B:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:59,401 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:59,417 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107650cbd965f6f47778f8e5c0480b75a17_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:59,419 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107650cbd965f6f47778f8e5c0480b75a17_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:59,420 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107650cbd965f6f47778f8e5c0480b75a17_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:59,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742041_1217 (size=12949) 2024-11-07T17:15:59,432 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/36733d0c449a4ca6a92c33f81677336f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/36733d0c449a4ca6a92c33f81677336f 2024-11-07T17:15:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742042_1218 (size=4469) 2024-11-07T17:15:59,439 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#181 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:59,439 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into 36733d0c449a4ca6a92c33f81677336f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:59,439 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:59,439 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999759377; duration=0sec 2024-11-07T17:15:59,440 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:15:59,440 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:15:59,440 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:15:59,440 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1c7331470515444e944fc35febeeed46 is 175, key is test_row_0/A:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:59,442 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:15:59,442 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:15:59,442 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,442 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/4afb2a657be74746b1109a355a8e88e0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/3261530fb680442fa5db93639151c7fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/78c2c00160f0488e88bd8a95f247a688] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.3 K 2024-11-07T17:15:59,443 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4afb2a657be74746b1109a355a8e88e0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1730999754848 2024-11-07T17:15:59,443 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3261530fb680442fa5db93639151c7fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1730999755986 2024-11-07T17:15:59,443 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 78c2c00160f0488e88bd8a95f247a688, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999756916 2024-11-07T17:15:59,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742043_1219 (size=31903) 2024-11-07T17:15:59,452 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1c7331470515444e944fc35febeeed46 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1c7331470515444e944fc35febeeed46 2024-11-07T17:15:59,458 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 1c7331470515444e944fc35febeeed46(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:59,458 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:59,458 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999759377; duration=0sec 2024-11-07T17:15:59,458 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:59,458 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:15:59,461 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#182 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:15:59,462 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/2fd1275978844d7fbfeba67a99855afa is 50, key is test_row_0/C:col10/1730999758053/Put/seqid=0 2024-11-07T17:15:59,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:15:59,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:15:59,463 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:15:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:15:59,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742044_1220 (size=12949) 2024-11-07T17:15:59,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110748b3f01cc7f2411d8b693ff34aedd57b_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999758061/Put/seqid=0 2024-11-07T17:15:59,479 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/2fd1275978844d7fbfeba67a99855afa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2fd1275978844d7fbfeba67a99855afa 2024-11-07T17:15:59,484 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into 2fd1275978844d7fbfeba67a99855afa(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:15:59,484 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:15:59,484 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999759377; duration=0sec 2024-11-07T17:15:59,484 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:15:59,484 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:15:59,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742045_1221 (size=12454) 2024-11-07T17:15:59,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:15:59,490 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110748b3f01cc7f2411d8b693ff34aedd57b_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110748b3f01cc7f2411d8b693ff34aedd57b_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:15:59,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c1db2e4396f14849979d6eb9ccc34122, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:15:59,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c1db2e4396f14849979d6eb9ccc34122 is 175, key is test_row_0/A:col10/1730999758061/Put/seqid=0 2024-11-07T17:15:59,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742046_1222 (size=31255) 2024-11-07T17:15:59,507 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c1db2e4396f14849979d6eb9ccc34122 2024-11-07T17:15:59,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/0b1ff6b2599242f9804daa63f3d5e7e5 is 50, key is test_row_0/B:col10/1730999758061/Put/seqid=0 2024-11-07T17:15:59,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742047_1223 (size=12301) 2024-11-07T17:15:59,924 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/0b1ff6b2599242f9804daa63f3d5e7e5 2024-11-07T17:15:59,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a524e0ed248e45d5acb49968a125ccc5 is 50, key is test_row_0/C:col10/1730999758061/Put/seqid=0 2024-11-07T17:15:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742048_1224 (size=12301) 2024-11-07T17:15:59,988 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a524e0ed248e45d5acb49968a125ccc5 2024-11-07T17:15:59,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c1db2e4396f14849979d6eb9ccc34122 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122 2024-11-07T17:16:00,001 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122, entries=150, sequenceid=297, filesize=30.5 K 2024-11-07T17:16:00,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/0b1ff6b2599242f9804daa63f3d5e7e5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0b1ff6b2599242f9804daa63f3d5e7e5 2024-11-07T17:16:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,011 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0b1ff6b2599242f9804daa63f3d5e7e5, entries=150, sequenceid=297, filesize=12.0 K 2024-11-07T17:16:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a524e0ed248e45d5acb49968a125ccc5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a524e0ed248e45d5acb49968a125ccc5 2024-11-07T17:16:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,026 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a524e0ed248e45d5acb49968a125ccc5, entries=150, sequenceid=297, filesize=12.0 K 2024-11-07T17:16:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,028 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 5c144e9b474530f0a58afb1f68827ff6 in 565ms, sequenceid=297, compaction requested=false 2024-11-07T17:16:00,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:00,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:00,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-07T17:16:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-07T17:16:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:16:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-07T17:16:00,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1030 sec 2024-11-07T17:16:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.1080 sec 2024-11-07T17:16:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:16:00,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:00,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:00,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:00,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:00,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fe5c4b5d39b34a7e8760ced7982d35b1_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742049_1225 (size=25158) 2024-11-07T17:16:00,363 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:00,370 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fe5c4b5d39b34a7e8760ced7982d35b1_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fe5c4b5d39b34a7e8760ced7982d35b1_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:00,373 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2d9d12df20094935a58e13fd10657be6, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:00,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2d9d12df20094935a58e13fd10657be6 is 175, key is test_row_0/A:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:00,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742050_1226 (size=74795) 2024-11-07T17:16:00,377 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=309, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2d9d12df20094935a58e13fd10657be6 2024-11-07T17:16:00,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/a3f0c862eadb44c8af92a3e52774ca4d is 50, key is test_row_0/B:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:00,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742051_1227 (size=12301) 2024-11-07T17:16:00,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999820421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999820423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999820423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999820524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999820527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999820528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999820728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999820731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:00,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999820731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:00,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/a3f0c862eadb44c8af92a3e52774ca4d 2024-11-07T17:16:00,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/8765882a9a6446e3901b489de3db1dea is 50, key is test_row_0/C:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:00,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742052_1228 (size=12301) 2024-11-07T17:16:01,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999821031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999821035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999821036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999821085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,088 DEBUG [Thread-693 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:01,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999821087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,090 DEBUG [Thread-685 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:01,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/8765882a9a6446e3901b489de3db1dea 2024-11-07T17:16:01,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/2d9d12df20094935a58e13fd10657be6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6 2024-11-07T17:16:01,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6, entries=400, sequenceid=309, filesize=73.0 K 2024-11-07T17:16:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/a3f0c862eadb44c8af92a3e52774ca4d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a3f0c862eadb44c8af92a3e52774ca4d 2024-11-07T17:16:01,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a3f0c862eadb44c8af92a3e52774ca4d, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T17:16:01,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/8765882a9a6446e3901b489de3db1dea as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/8765882a9a6446e3901b489de3db1dea 2024-11-07T17:16:01,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/8765882a9a6446e3901b489de3db1dea, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T17:16:01,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 5c144e9b474530f0a58afb1f68827ff6 in 979ms, sequenceid=309, compaction requested=true 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:01,250 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:01,250 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:01,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:01,253 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:01,253 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137953 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:01,253 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:16:01,253 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:16:01,253 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:01,253 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:01,253 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1c7331470515444e944fc35febeeed46, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=134.7 K 2024-11-07T17:16:01,253 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/36733d0c449a4ca6a92c33f81677336f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0b1ff6b2599242f9804daa63f3d5e7e5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a3f0c862eadb44c8af92a3e52774ca4d] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.7 K 2024-11-07T17:16:01,253 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:01,253 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1c7331470515444e944fc35febeeed46, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6] 2024-11-07T17:16:01,254 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 36733d0c449a4ca6a92c33f81677336f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999756916 2024-11-07T17:16:01,254 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c7331470515444e944fc35febeeed46, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999756916 2024-11-07T17:16:01,254 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b1ff6b2599242f9804daa63f3d5e7e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730999758057 2024-11-07T17:16:01,254 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1db2e4396f14849979d6eb9ccc34122, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730999758057 2024-11-07T17:16:01,255 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a3f0c862eadb44c8af92a3e52774ca4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999760261 2024-11-07T17:16:01,255 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d9d12df20094935a58e13fd10657be6, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999760207 2024-11-07T17:16:01,275 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:01,275 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/19f05b4413e641bf8a5fe18897dd87d7 is 50, key is test_row_0/B:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:01,284 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:01,306 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107ebe71a1f34d94946ad83c1e72976e3b0_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742053_1229 (size=13051) 2024-11-07T17:16:01,309 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107ebe71a1f34d94946ad83c1e72976e3b0_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:01,310 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ebe71a1f34d94946ad83c1e72976e3b0_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:01,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742054_1230 (size=4469) 2024-11-07T17:16:01,343 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#190 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:01,344 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/fb1b353e6a4d4dad887015dd7f2f13dd is 175, key is test_row_0/A:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:01,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742055_1231 (size=32005) 2024-11-07T17:16:01,365 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/fb1b353e6a4d4dad887015dd7f2f13dd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/fb1b353e6a4d4dad887015dd7f2f13dd 2024-11-07T17:16:01,373 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into fb1b353e6a4d4dad887015dd7f2f13dd(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:01,373 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:01,373 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999761250; duration=0sec 2024-11-07T17:16:01,373 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:01,373 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:16:01,373 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:01,374 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:01,374 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:16:01,374 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:01,375 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2fd1275978844d7fbfeba67a99855afa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a524e0ed248e45d5acb49968a125ccc5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/8765882a9a6446e3901b489de3db1dea] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.7 K 2024-11-07T17:16:01,375 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fd1275978844d7fbfeba67a99855afa, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999756916 2024-11-07T17:16:01,376 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a524e0ed248e45d5acb49968a125ccc5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730999758057 2024-11-07T17:16:01,376 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8765882a9a6446e3901b489de3db1dea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999760261 2024-11-07T17:16:01,394 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#191 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:01,395 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a95a53ecf35c43fe8d5f0e1b944d5968 is 50, key is test_row_0/C:col10/1730999760268/Put/seqid=0 2024-11-07T17:16:01,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742056_1232 (size=13051) 2024-11-07T17:16:01,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:01,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-07T17:16:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:01,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:01,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:01,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:01,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:01,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:01,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107953f59ce936c494bbde59b44143767b1_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999761534/Put/seqid=0 2024-11-07T17:16:01,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999821545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999821546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742057_1233 (size=14994) 2024-11-07T17:16:01,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999821548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999821650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999821650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999821653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,719 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/19f05b4413e641bf8a5fe18897dd87d7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/19f05b4413e641bf8a5fe18897dd87d7 2024-11-07T17:16:01,728 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into 19f05b4413e641bf8a5fe18897dd87d7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:01,728 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:01,728 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999761250; duration=0sec 2024-11-07T17:16:01,728 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:01,728 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:16:01,808 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/a95a53ecf35c43fe8d5f0e1b944d5968 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a95a53ecf35c43fe8d5f0e1b944d5968 2024-11-07T17:16:01,816 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into a95a53ecf35c43fe8d5f0e1b944d5968(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:01,816 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:01,816 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999761250; duration=0sec 2024-11-07T17:16:01,816 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:01,816 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:16:01,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999821857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999821857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999821858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:01,953 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:01,963 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107953f59ce936c494bbde59b44143767b1_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107953f59ce936c494bbde59b44143767b1_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:01,964 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/76963d32509e41fb9415db3848037898, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:01,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/76963d32509e41fb9415db3848037898 is 175, key is test_row_0/A:col10/1730999761534/Put/seqid=0 2024-11-07T17:16:02,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742058_1234 (size=39949) 2024-11-07T17:16:02,003 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=337, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/76963d32509e41fb9415db3848037898 2024-11-07T17:16:02,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8 is 50, key is test_row_0/B:col10/1730999761534/Put/seqid=0 2024-11-07T17:16:02,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T17:16:02,031 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-07T17:16:02,033 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:02,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-07T17:16:02,035 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:02,035 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:02,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:02,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-07T17:16:02,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742059_1235 (size=12301) 2024-11-07T17:16:02,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-07T17:16:02,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:02,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999822159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:02,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999822160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999822164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,188 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-07T17:16:02,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:02,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-07T17:16:02,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-07T17:16:02,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:02,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8 2024-11-07T17:16:02,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/2e313d73bdf4426fad5b59650f447a19 is 50, key is test_row_0/C:col10/1730999761534/Put/seqid=0 2024-11-07T17:16:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742060_1236 (size=12301) 2024-11-07T17:16:02,494 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-07T17:16:02,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:02,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-07T17:16:02,647 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-07T17:16:02,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:02,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999822662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:02,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999822665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:02,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999822670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-07T17:16:02,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:02,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:02,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/2e313d73bdf4426fad5b59650f447a19 2024-11-07T17:16:02,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/76963d32509e41fb9415db3848037898 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898 2024-11-07T17:16:02,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898, entries=200, sequenceid=337, filesize=39.0 K 2024-11-07T17:16:02,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8 2024-11-07T17:16:02,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8, entries=150, sequenceid=337, filesize=12.0 K 2024-11-07T17:16:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/2e313d73bdf4426fad5b59650f447a19 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2e313d73bdf4426fad5b59650f447a19 2024-11-07T17:16:02,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2e313d73bdf4426fad5b59650f447a19, entries=150, sequenceid=337, filesize=12.0 K 2024-11-07T17:16:02,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 5c144e9b474530f0a58afb1f68827ff6 in 1353ms, sequenceid=337, compaction requested=false 2024-11-07T17:16:02,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:02,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:02,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-07T17:16:02,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:02,955 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-07T17:16:02,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:02,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:02,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:02,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:02,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:02,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:02,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c1c99f6f416e44ccb5601b0d1d7e9bc1_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999761543/Put/seqid=0 2024-11-07T17:16:02,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742061_1237 (size=12454) 2024-11-07T17:16:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-07T17:16:03,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:03,387 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c1c99f6f416e44ccb5601b0d1d7e9bc1_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c1c99f6f416e44ccb5601b0d1d7e9bc1_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:03,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/39e87c720f3842d781b8d5d45be84f0b, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:03,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/39e87c720f3842d781b8d5d45be84f0b is 175, key is test_row_0/A:col10/1730999761543/Put/seqid=0 2024-11-07T17:16:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742062_1238 (size=31255) 2024-11-07T17:16:03,403 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=348, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/39e87c720f3842d781b8d5d45be84f0b 2024-11-07T17:16:03,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/00434188772b462680a296613d0d8fec is 50, key is test_row_0/B:col10/1730999761543/Put/seqid=0 2024-11-07T17:16:03,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742063_1239 (size=12301) 2024-11-07T17:16:03,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:03,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:03,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999823721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:03,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:03,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999823722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:03,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:03,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999823723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:03,816 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/00434188772b462680a296613d0d8fec 2024-11-07T17:16:03,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/1448a22f182040d0bec1376d5d1bc68f is 50, key is test_row_0/C:col10/1730999761543/Put/seqid=0 2024-11-07T17:16:03,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:03,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999823824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:03,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:03,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999823824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:03,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999823825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:03,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742064_1240 (size=12301) 2024-11-07T17:16:03,828 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/1448a22f182040d0bec1376d5d1bc68f 2024-11-07T17:16:03,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/39e87c720f3842d781b8d5d45be84f0b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b 2024-11-07T17:16:03,838 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b, entries=150, sequenceid=348, filesize=30.5 K 2024-11-07T17:16:03,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/00434188772b462680a296613d0d8fec as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/00434188772b462680a296613d0d8fec 2024-11-07T17:16:03,843 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/00434188772b462680a296613d0d8fec, entries=150, sequenceid=348, filesize=12.0 K 2024-11-07T17:16:03,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/1448a22f182040d0bec1376d5d1bc68f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/1448a22f182040d0bec1376d5d1bc68f 2024-11-07T17:16:03,849 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/1448a22f182040d0bec1376d5d1bc68f, entries=150, sequenceid=348, filesize=12.0 K 2024-11-07T17:16:03,850 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 5c144e9b474530f0a58afb1f68827ff6 in 894ms, sequenceid=348, compaction requested=true 2024-11-07T17:16:03,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:03,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:03,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-07T17:16:03,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-07T17:16:03,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-07T17:16:03,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8150 sec 2024-11-07T17:16:03,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.8200 sec 2024-11-07T17:16:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:04,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-07T17:16:04,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:04,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:04,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:04,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:04,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:04,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:04,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999824033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107631ede8b07b340da94e4324bd886f8a2_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:04,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999824037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999824040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742065_1241 (size=17534) 2024-11-07T17:16:04,045 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:04,050 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107631ede8b07b340da94e4324bd886f8a2_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107631ede8b07b340da94e4324bd886f8a2_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:04,051 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1a4f47f83608456394e5dda6fabc12dd, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:04,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1a4f47f83608456394e5dda6fabc12dd is 175, key is test_row_0/A:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:04,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742066_1242 (size=48639) 2024-11-07T17:16:04,056 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1a4f47f83608456394e5dda6fabc12dd 2024-11-07T17:16:04,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/27a8a9229fc04ed1ae30c422e6fb9dde is 50, key is test_row_0/B:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:04,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742067_1243 (size=12301) 2024-11-07T17:16:04,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999824138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-07T17:16:04,141 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-07T17:16:04,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999824140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,142 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:04,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999824141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-07T17:16:04,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:04,144 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:04,144 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:04,145 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:04,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:04,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-07T17:16:04,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:04,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,298 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999824339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999824342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999824344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:04,450 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-07T17:16:04,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:04,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/27a8a9229fc04ed1ae30c422e6fb9dde 2024-11-07T17:16:04,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9904d232bbc844d09a08edb2a7df1108 is 50, key is test_row_0/C:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:04,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742068_1244 (size=12301) 2024-11-07T17:16:04,603 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-07T17:16:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999824642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999824647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999824647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:04,756 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-07T17:16:04,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:04,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:04,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9904d232bbc844d09a08edb2a7df1108 2024-11-07T17:16:04,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/1a4f47f83608456394e5dda6fabc12dd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd 2024-11-07T17:16:04,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd, entries=250, sequenceid=377, filesize=47.5 K 2024-11-07T17:16:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/27a8a9229fc04ed1ae30c422e6fb9dde as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/27a8a9229fc04ed1ae30c422e6fb9dde 2024-11-07T17:16:04,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/27a8a9229fc04ed1ae30c422e6fb9dde, entries=150, sequenceid=377, filesize=12.0 K 2024-11-07T17:16:04,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/9904d232bbc844d09a08edb2a7df1108 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9904d232bbc844d09a08edb2a7df1108 2024-11-07T17:16:04,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9904d232bbc844d09a08edb2a7df1108, entries=150, sequenceid=377, filesize=12.0 K 2024-11-07T17:16:04,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 5c144e9b474530f0a58afb1f68827ff6 in 881ms, sequenceid=377, compaction requested=true 2024-11-07T17:16:04,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:04,909 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:04,910 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:04,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:04,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-07T17:16:04,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:04,910 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:04,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:04,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:04,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:04,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:04,910 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-07T17:16:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:04,911 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151848 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:04,911 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:16:04,911 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,912 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/fb1b353e6a4d4dad887015dd7f2f13dd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=148.3 K 2024-11-07T17:16:04,912 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,912 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/fb1b353e6a4d4dad887015dd7f2f13dd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd] 2024-11-07T17:16:04,912 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb1b353e6a4d4dad887015dd7f2f13dd, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999760261 2024-11-07T17:16:04,913 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:04,913 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:16:04,913 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:04,913 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/19f05b4413e641bf8a5fe18897dd87d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/00434188772b462680a296613d0d8fec, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/27a8a9229fc04ed1ae30c422e6fb9dde] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=48.8 K 2024-11-07T17:16:04,913 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76963d32509e41fb9415db3848037898, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1730999760420 2024-11-07T17:16:04,914 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 19f05b4413e641bf8a5fe18897dd87d7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999760261 2024-11-07T17:16:04,914 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39e87c720f3842d781b8d5d45be84f0b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1730999761543 2024-11-07T17:16:04,915 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c47475b5e9a4ef6b2bf2debc7f0c7a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1730999760421 2024-11-07T17:16:04,915 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a4f47f83608456394e5dda6fabc12dd, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1730999763720 2024-11-07T17:16:04,915 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 00434188772b462680a296613d0d8fec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1730999761543 2024-11-07T17:16:04,917 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 27a8a9229fc04ed1ae30c422e6fb9dde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1730999763721 2024-11-07T17:16:04,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f1951afce9ab4ef9936836ba6a83bdab_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999764031/Put/seqid=0 2024-11-07T17:16:04,935 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#202 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:04,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742069_1245 (size=12454) 2024-11-07T17:16:04,936 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/95b4179cb9fc477cb1d2611cab022dd9 is 50, key is test_row_0/B:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:04,937 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:04,941 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107b68f7179b59d43da9899c94b2656f20e_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:04,943 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107b68f7179b59d43da9899c94b2656f20e_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:04,944 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b68f7179b59d43da9899c94b2656f20e_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742070_1246 (size=13187) 2024-11-07T17:16:04,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742071_1247 (size=4469) 2024-11-07T17:16:05,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. as already flushing 2024-11-07T17:16:05,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:05,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999825165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999825168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999825168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999825168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999825169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:05,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999825270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999825272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999825272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999825272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999825273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:05,339 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f1951afce9ab4ef9936836ba6a83bdab_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f1951afce9ab4ef9936836ba6a83bdab_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:05,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/4347cf9344644afaafb32b9cc10a2932, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:05,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/4347cf9344644afaafb32b9cc10a2932 is 175, key is test_row_0/A:col10/1730999764031/Put/seqid=0 2024-11-07T17:16:05,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742072_1248 (size=31255) 2024-11-07T17:16:05,358 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/95b4179cb9fc477cb1d2611cab022dd9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/95b4179cb9fc477cb1d2611cab022dd9 2024-11-07T17:16:05,359 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#203 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:05,359 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/7c43e18769d94a76a9f5996dcd9155f6 is 175, key is test_row_0/A:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:05,363 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into 95b4179cb9fc477cb1d2611cab022dd9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:05,363 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:05,363 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=12, startTime=1730999764910; duration=0sec 2024-11-07T17:16:05,364 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:05,364 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:16:05,364 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:05,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742073_1249 (size=32141) 2024-11-07T17:16:05,366 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:05,366 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:16:05,366 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:05,366 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a95a53ecf35c43fe8d5f0e1b944d5968, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2e313d73bdf4426fad5b59650f447a19, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/1448a22f182040d0bec1376d5d1bc68f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9904d232bbc844d09a08edb2a7df1108] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=48.8 K 2024-11-07T17:16:05,366 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a95a53ecf35c43fe8d5f0e1b944d5968, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999760261 2024-11-07T17:16:05,367 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e313d73bdf4426fad5b59650f447a19, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1730999760421 2024-11-07T17:16:05,367 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1448a22f182040d0bec1376d5d1bc68f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1730999761543 2024-11-07T17:16:05,367 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9904d232bbc844d09a08edb2a7df1108, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1730999763721 2024-11-07T17:16:05,375 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#204 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:05,376 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/604fc1a191de4d93b42a68ce9558c099 is 50, key is test_row_0/C:col10/1730999764029/Put/seqid=0 2024-11-07T17:16:05,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742074_1250 (size=13187) 2024-11-07T17:16:05,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999825473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999825475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999825474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999825476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999825476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,747 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=385, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/4347cf9344644afaafb32b9cc10a2932 2024-11-07T17:16:05,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/a77beb83095b4cd6a14a05280f7a30da is 50, key is test_row_0/B:col10/1730999764031/Put/seqid=0 2024-11-07T17:16:05,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742075_1251 (size=12301) 2024-11-07T17:16:05,771 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/7c43e18769d94a76a9f5996dcd9155f6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/7c43e18769d94a76a9f5996dcd9155f6 2024-11-07T17:16:05,776 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 7c43e18769d94a76a9f5996dcd9155f6(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:05,776 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:05,776 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=12, startTime=1730999764909; duration=0sec 2024-11-07T17:16:05,776 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:05,776 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:16:05,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999825778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999825780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999825780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999825780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999825782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:05,786 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/604fc1a191de4d93b42a68ce9558c099 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/604fc1a191de4d93b42a68ce9558c099 2024-11-07T17:16:05,791 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into 604fc1a191de4d93b42a68ce9558c099(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:05,791 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:05,791 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=12, startTime=1730999764910; duration=0sec 2024-11-07T17:16:05,791 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:05,791 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:16:06,160 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/a77beb83095b4cd6a14a05280f7a30da 2024-11-07T17:16:06,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/45e136b56cd24fc0b704707a4b327e9c is 50, key is test_row_0/C:col10/1730999764031/Put/seqid=0 2024-11-07T17:16:06,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742076_1252 (size=12301) 2024-11-07T17:16:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:06,260 DEBUG [Thread-702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c38ee58 to 127.0.0.1:64938 2024-11-07T17:16:06,260 DEBUG [Thread-702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:06,261 DEBUG [Thread-696 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:64938 2024-11-07T17:16:06,261 DEBUG [Thread-696 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:06,261 DEBUG [Thread-698 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b727d6e to 127.0.0.1:64938 2024-11-07T17:16:06,261 DEBUG [Thread-698 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:06,262 DEBUG [Thread-700 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c7940d9 to 127.0.0.1:64938 2024-11-07T17:16:06,262 DEBUG [Thread-700 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:06,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34306 deadline: 1730999826282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:06,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34298 deadline: 1730999826283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:06,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34286 deadline: 1730999826285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:06,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999826287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:06,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1730999826288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:06,574 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/45e136b56cd24fc0b704707a4b327e9c 2024-11-07T17:16:06,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/4347cf9344644afaafb32b9cc10a2932 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932 2024-11-07T17:16:06,583 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932, entries=150, sequenceid=385, filesize=30.5 K 2024-11-07T17:16:06,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/a77beb83095b4cd6a14a05280f7a30da as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a77beb83095b4cd6a14a05280f7a30da 2024-11-07T17:16:06,587 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a77beb83095b4cd6a14a05280f7a30da, entries=150, sequenceid=385, filesize=12.0 K 2024-11-07T17:16:06,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/45e136b56cd24fc0b704707a4b327e9c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/45e136b56cd24fc0b704707a4b327e9c 2024-11-07T17:16:06,591 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/45e136b56cd24fc0b704707a4b327e9c, entries=150, sequenceid=385, filesize=12.0 K 2024-11-07T17:16:06,592 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 5c144e9b474530f0a58afb1f68827ff6 in 1682ms, sequenceid=385, compaction requested=false 2024-11-07T17:16:06,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:06,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:06,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-07T17:16:06,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-07T17:16:06,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-07T17:16:06,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4480 sec 2024-11-07T17:16:06,595 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 2.4510 sec 2024-11-07T17:16:07,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:07,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-07T17:16:07,288 DEBUG [Thread-691 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x505d5ccd to 127.0.0.1:64938 2024-11-07T17:16:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:07,288 DEBUG [Thread-691 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:07,289 DEBUG [Thread-685 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79982672 to 127.0.0.1:64938 2024-11-07T17:16:07,289 DEBUG [Thread-689 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x454f1431 to 127.0.0.1:64938 2024-11-07T17:16:07,289 DEBUG [Thread-685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:07,289 DEBUG [Thread-689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:07,293 DEBUG [Thread-687 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b4bd1ba to 127.0.0.1:64938 2024-11-07T17:16:07,293 DEBUG [Thread-687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:07,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:07,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34270 deadline: 1730999827293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:07,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a6190a4125984f9ba32c60fdc6825fb5_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:07,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742077_1253 (size=12454) 2024-11-07T17:16:07,700 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:07,704 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a6190a4125984f9ba32c60fdc6825fb5_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a6190a4125984f9ba32c60fdc6825fb5_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:07,705 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/bdc36fe753c140dea2b37da2fefcae48, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:07,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/bdc36fe753c140dea2b37da2fefcae48 is 175, key is test_row_0/A:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:07,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742078_1254 (size=31255) 2024-11-07T17:16:08,110 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=418, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/bdc36fe753c140dea2b37da2fefcae48 2024-11-07T17:16:08,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/1fcfa93e1fca48fab310f387c8d6cbac is 50, key is test_row_0/B:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:08,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742079_1255 (size=12301) 2024-11-07T17:16:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T17:16:08,249 INFO [Thread-695 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-07T17:16:08,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/1fcfa93e1fca48fab310f387c8d6cbac 2024-11-07T17:16:08,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/7cda24e1a19c4adcaa58ee68bbeee986 is 50, key is test_row_0/C:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:08,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742080_1256 (size=12301) 2024-11-07T17:16:08,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/7cda24e1a19c4adcaa58ee68bbeee986 2024-11-07T17:16:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/bdc36fe753c140dea2b37da2fefcae48 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48 2024-11-07T17:16:08,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48, entries=150, sequenceid=418, filesize=30.5 K 2024-11-07T17:16:08,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/1fcfa93e1fca48fab310f387c8d6cbac as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1fcfa93e1fca48fab310f387c8d6cbac 2024-11-07T17:16:08,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1fcfa93e1fca48fab310f387c8d6cbac, entries=150, sequenceid=418, filesize=12.0 K 2024-11-07T17:16:08,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/7cda24e1a19c4adcaa58ee68bbeee986 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/7cda24e1a19c4adcaa58ee68bbeee986 2024-11-07T17:16:08,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/7cda24e1a19c4adcaa58ee68bbeee986, entries=150, sequenceid=418, filesize=12.0 K 2024-11-07T17:16:08,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 5c144e9b474530f0a58afb1f68827ff6 in 1660ms, sequenceid=418, compaction requested=true 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:08,948 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c144e9b474530f0a58afb1f68827ff6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:08,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:08,948 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/A is initiating minor compaction (all files) 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:08,949 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/A in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/B is initiating minor compaction (all files) 2024-11-07T17:16:08,949 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/7c43e18769d94a76a9f5996dcd9155f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=92.4 K 2024-11-07T17:16:08,949 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:08,949 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/B in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/7c43e18769d94a76a9f5996dcd9155f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48] 2024-11-07T17:16:08,949 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/95b4179cb9fc477cb1d2611cab022dd9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a77beb83095b4cd6a14a05280f7a30da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1fcfa93e1fca48fab310f387c8d6cbac] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.9 K 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c43e18769d94a76a9f5996dcd9155f6, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1730999763721 2024-11-07T17:16:08,949 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 95b4179cb9fc477cb1d2611cab022dd9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1730999763721 2024-11-07T17:16:08,950 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4347cf9344644afaafb32b9cc10a2932, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1730999764031 2024-11-07T17:16:08,950 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a77beb83095b4cd6a14a05280f7a30da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1730999764031 2024-11-07T17:16:08,950 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdc36fe753c140dea2b37da2fefcae48, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1730999765162 2024-11-07T17:16:08,950 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fcfa93e1fca48fab310f387c8d6cbac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1730999765162 2024-11-07T17:16:08,958 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:08,958 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#B#compaction#210 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:08,958 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/e8d421d77b854a158869dd4eb065a42b is 50, key is test_row_0/B:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:08,960 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107cc374b4a5ee040c382707ce83334cc6c_5c144e9b474530f0a58afb1f68827ff6 store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:08,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742081_1257 (size=13289) 2024-11-07T17:16:08,978 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107cc374b4a5ee040c382707ce83334cc6c_5c144e9b474530f0a58afb1f68827ff6, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:08,978 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107cc374b4a5ee040c382707ce83334cc6c_5c144e9b474530f0a58afb1f68827ff6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:08,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742082_1258 (size=4469) 2024-11-07T17:16:08,984 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#A#compaction#211 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:08,984 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/246e321adf044cb686e9020ac92ebfb2 is 175, key is test_row_0/A:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:08,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742083_1259 (size=32243) 2024-11-07T17:16:08,993 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/246e321adf044cb686e9020ac92ebfb2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/246e321adf044cb686e9020ac92ebfb2 2024-11-07T17:16:08,998 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/A of 5c144e9b474530f0a58afb1f68827ff6 into 246e321adf044cb686e9020ac92ebfb2(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:08,998 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:08,998 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/A, priority=13, startTime=1730999768948; duration=0sec 2024-11-07T17:16:08,998 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:08,998 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:A 2024-11-07T17:16:08,998 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:08,999 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:09,000 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 5c144e9b474530f0a58afb1f68827ff6/C is initiating minor compaction (all files) 2024-11-07T17:16:09,000 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5c144e9b474530f0a58afb1f68827ff6/C in TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:09,000 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/604fc1a191de4d93b42a68ce9558c099, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/45e136b56cd24fc0b704707a4b327e9c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/7cda24e1a19c4adcaa58ee68bbeee986] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp, totalSize=36.9 K 2024-11-07T17:16:09,000 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 604fc1a191de4d93b42a68ce9558c099, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1730999763721 2024-11-07T17:16:09,000 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45e136b56cd24fc0b704707a4b327e9c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1730999764031 2024-11-07T17:16:09,001 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cda24e1a19c4adcaa58ee68bbeee986, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1730999765162 2024-11-07T17:16:09,010 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c144e9b474530f0a58afb1f68827ff6#C#compaction#212 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:09,010 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/dd4150c826ca462180d375ad3f1cb8e2 is 50, key is test_row_0/C:col10/1730999765162/Put/seqid=0 2024-11-07T17:16:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742084_1260 (size=13289) 2024-11-07T17:16:09,302 DEBUG [Thread-693 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:64938 2024-11-07T17:16:09,302 DEBUG [Thread-693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5839 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5669 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2459 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7377 rows 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2487 2024-11-07T17:16:09,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7461 rows 2024-11-07T17:16:09,303 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:16:09,303 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x118b007e to 127.0.0.1:64938 2024-11-07T17:16:09,303 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:09,305 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T17:16:09,306 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T17:16:09,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:09,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-07T17:16:09,310 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999769310"}]},"ts":"1730999769310"} 2024-11-07T17:16:09,312 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T17:16:09,314 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T17:16:09,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:16:09,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, UNASSIGN}] 2024-11-07T17:16:09,316 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, UNASSIGN 2024-11-07T17:16:09,317 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:09,318 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:16:09,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; CloseRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:16:09,367 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/e8d421d77b854a158869dd4eb065a42b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/e8d421d77b854a158869dd4eb065a42b 2024-11-07T17:16:09,371 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/B of 5c144e9b474530f0a58afb1f68827ff6 into e8d421d77b854a158869dd4eb065a42b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:09,372 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:09,372 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/B, priority=13, startTime=1730999768948; duration=0sec 2024-11-07T17:16:09,372 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:09,372 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:B 2024-11-07T17:16:09,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-07T17:16:09,419 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/dd4150c826ca462180d375ad3f1cb8e2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/dd4150c826ca462180d375ad3f1cb8e2 2024-11-07T17:16:09,423 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5c144e9b474530f0a58afb1f68827ff6/C of 5c144e9b474530f0a58afb1f68827ff6 into dd4150c826ca462180d375ad3f1cb8e2(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:09,423 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:09,423 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6., storeName=5c144e9b474530f0a58afb1f68827ff6/C, priority=13, startTime=1730999768948; duration=0sec 2024-11-07T17:16:09,423 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:09,423 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c144e9b474530f0a58afb1f68827ff6:C 2024-11-07T17:16:09,469 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:09,470 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(124): Close 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1681): Closing 5c144e9b474530f0a58afb1f68827ff6, disabling compactions & flushes 2024-11-07T17:16:09,470 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. after waiting 0 ms 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:09,470 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(2837): Flushing 5c144e9b474530f0a58afb1f68827ff6 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=A 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=B 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5c144e9b474530f0a58afb1f68827ff6, store=C 2024-11-07T17:16:09,470 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:09,476 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070c8c4a891c064d5c90de8e663fbc9032_5c144e9b474530f0a58afb1f68827ff6 is 50, key is test_row_0/A:col10/1730999769301/Put/seqid=0 2024-11-07T17:16:09,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742085_1261 (size=9914) 2024-11-07T17:16:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-07T17:16:09,881 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:09,884 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070c8c4a891c064d5c90de8e663fbc9032_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070c8c4a891c064d5c90de8e663fbc9032_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:09,885 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c18853e67dce46cbb9cbdddbc9ec8da7, store: [table=TestAcidGuarantees family=A region=5c144e9b474530f0a58afb1f68827ff6] 2024-11-07T17:16:09,886 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c18853e67dce46cbb9cbdddbc9ec8da7 is 175, key is test_row_0/A:col10/1730999769301/Put/seqid=0 2024-11-07T17:16:09,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742086_1262 (size=22561) 2024-11-07T17:16:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-07T17:16:10,290 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=428, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c18853e67dce46cbb9cbdddbc9ec8da7 2024-11-07T17:16:10,297 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/0089f4db72914798bab376acc0cde7b1 is 50, key is test_row_0/B:col10/1730999769301/Put/seqid=0 2024-11-07T17:16:10,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742087_1263 (size=9857) 2024-11-07T17:16:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-07T17:16:10,701 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/0089f4db72914798bab376acc0cde7b1 2024-11-07T17:16:10,708 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/06ca54523bfe4f8dae83e234ee028f75 is 50, key is test_row_0/C:col10/1730999769301/Put/seqid=0 2024-11-07T17:16:10,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742088_1264 (size=9857) 2024-11-07T17:16:11,113 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/06ca54523bfe4f8dae83e234ee028f75 2024-11-07T17:16:11,117 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/A/c18853e67dce46cbb9cbdddbc9ec8da7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c18853e67dce46cbb9cbdddbc9ec8da7 2024-11-07T17:16:11,120 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c18853e67dce46cbb9cbdddbc9ec8da7, entries=100, sequenceid=428, filesize=22.0 K 2024-11-07T17:16:11,121 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/B/0089f4db72914798bab376acc0cde7b1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0089f4db72914798bab376acc0cde7b1 2024-11-07T17:16:11,124 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0089f4db72914798bab376acc0cde7b1, entries=100, sequenceid=428, filesize=9.6 K 2024-11-07T17:16:11,125 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/.tmp/C/06ca54523bfe4f8dae83e234ee028f75 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/06ca54523bfe4f8dae83e234ee028f75 2024-11-07T17:16:11,128 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/06ca54523bfe4f8dae83e234ee028f75, entries=100, sequenceid=428, filesize=9.6 K 2024-11-07T17:16:11,129 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 5c144e9b474530f0a58afb1f68827ff6 in 1659ms, sequenceid=428, compaction requested=false 2024-11-07T17:16:11,129 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1e81fb63ede24570bee2e66b66e0036d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3b7263a622264f539d49a428b69042fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/0b55e51d194e4e379db530c94be43be4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3993fac3a9ce4c6588724044de11f84e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/38fe0dc3b17247c2942933e14677ea37, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1c7331470515444e944fc35febeeed46, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/fb1b353e6a4d4dad887015dd7f2f13dd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/7c43e18769d94a76a9f5996dcd9155f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48] to archive 2024-11-07T17:16:11,130 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:16:11,132 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/55c0cb1171174d248753dd1aa0658831 2024-11-07T17:16:11,133 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/326686d5ea164bc79f9fe4fef6b96e24 2024-11-07T17:16:11,134 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1e81fb63ede24570bee2e66b66e0036d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1e81fb63ede24570bee2e66b66e0036d 2024-11-07T17:16:11,135 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/a37f6175294345a1bafd58fcaa91f303 2024-11-07T17:16:11,136 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/6a76286bcf1840eeb77d0d7d2571bfcf 2024-11-07T17:16:11,138 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3b7263a622264f539d49a428b69042fa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3b7263a622264f539d49a428b69042fa 2024-11-07T17:16:11,139 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/66234dbeb3f54674ae050a531e74b416 2024-11-07T17:16:11,140 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/71fc3a4d295b4db785152eb71dcb91ad 2024-11-07T17:16:11,141 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/41749e908c3f40c0a1b939f382998b90 2024-11-07T17:16:11,142 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/0b55e51d194e4e379db530c94be43be4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/0b55e51d194e4e379db530c94be43be4 2024-11-07T17:16:11,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T17:16:11,142 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/beddcab2bb684931ab571793a4e22189 2024-11-07T17:16:11,143 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/de2116fd2f6b447a8b88794bdc5f5d2d 2024-11-07T17:16:11,144 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/e857e577f58041929e698f5118e50a6a 2024-11-07T17:16:11,145 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3993fac3a9ce4c6588724044de11f84e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/3993fac3a9ce4c6588724044de11f84e 2024-11-07T17:16:11,146 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/269eab1018574de690af2b3f231d6e0c 2024-11-07T17:16:11,147 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2bc5b62180c349b99ee5ea70b63915be 2024-11-07T17:16:11,148 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/38fe0dc3b17247c2942933e14677ea37 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/38fe0dc3b17247c2942933e14677ea37 2024-11-07T17:16:11,149 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c24b37f7d011471a877d99c840792f40 2024-11-07T17:16:11,150 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1c7331470515444e944fc35febeeed46 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1c7331470515444e944fc35febeeed46 2024-11-07T17:16:11,151 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/ab1397683d1744f0a229daf569823722 2024-11-07T17:16:11,153 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c1db2e4396f14849979d6eb9ccc34122 2024-11-07T17:16:11,154 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/2d9d12df20094935a58e13fd10657be6 2024-11-07T17:16:11,155 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/fb1b353e6a4d4dad887015dd7f2f13dd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/fb1b353e6a4d4dad887015dd7f2f13dd 2024-11-07T17:16:11,156 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/76963d32509e41fb9415db3848037898 2024-11-07T17:16:11,157 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/39e87c720f3842d781b8d5d45be84f0b 2024-11-07T17:16:11,158 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/1a4f47f83608456394e5dda6fabc12dd 2024-11-07T17:16:11,159 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/7c43e18769d94a76a9f5996dcd9155f6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/7c43e18769d94a76a9f5996dcd9155f6 2024-11-07T17:16:11,159 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/4347cf9344644afaafb32b9cc10a2932 2024-11-07T17:16:11,160 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/bdc36fe753c140dea2b37da2fefcae48 2024-11-07T17:16:11,162 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/29b518a8bff1485a81b9d38bf8dbf91a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/7f9f288bcb3c4b86b186c6b0e15eed7c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3c1117032ecf458da13e28c90591fcaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3475c91e8084439aa2322fd803459d60, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/c64b7b6a0b7f4952bd624678e4a072aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b0d1964d25bb432984ee16ca95f0cb13, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/089e6f5652e04b2790f74467369739d3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b268afec57f041b3bd1af9315a78cdcf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1947152e3d314ce88947053f34e54cf9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6fad5c4707f84cdc8463a8563d524d5d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6a56629c5ad74dd68e786830ca5d2167, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/aef67027dbd54904a211c1af8ae6ab43, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/bcb0e11ab039489db35ad8ab3f12b429, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/5b057b6b7d1d47728a33b9eecb992426, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/9635665c0e8441a59305d7e7246e2e80, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/726e13cbbc604081b9c7e8a9619f28d4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/949e367dc2c84146b1cf50fac53d5b45, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/8c803a4a148043cd91c2da3c7f53335a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/36733d0c449a4ca6a92c33f81677336f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/2ba2d57d6ceb41de99388d29281a0b18, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0b1ff6b2599242f9804daa63f3d5e7e5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/19f05b4413e641bf8a5fe18897dd87d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a3f0c862eadb44c8af92a3e52774ca4d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/00434188772b462680a296613d0d8fec, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/95b4179cb9fc477cb1d2611cab022dd9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/27a8a9229fc04ed1ae30c422e6fb9dde, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a77beb83095b4cd6a14a05280f7a30da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1fcfa93e1fca48fab310f387c8d6cbac] to archive 2024-11-07T17:16:11,163 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:16:11,164 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/29b518a8bff1485a81b9d38bf8dbf91a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/29b518a8bff1485a81b9d38bf8dbf91a 2024-11-07T17:16:11,165 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/7f9f288bcb3c4b86b186c6b0e15eed7c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/7f9f288bcb3c4b86b186c6b0e15eed7c 2024-11-07T17:16:11,166 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3c1117032ecf458da13e28c90591fcaa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3c1117032ecf458da13e28c90591fcaa 2024-11-07T17:16:11,167 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3475c91e8084439aa2322fd803459d60 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/3475c91e8084439aa2322fd803459d60 2024-11-07T17:16:11,167 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/c64b7b6a0b7f4952bd624678e4a072aa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/c64b7b6a0b7f4952bd624678e4a072aa 2024-11-07T17:16:11,168 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b0d1964d25bb432984ee16ca95f0cb13 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b0d1964d25bb432984ee16ca95f0cb13 2024-11-07T17:16:11,169 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/089e6f5652e04b2790f74467369739d3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/089e6f5652e04b2790f74467369739d3 2024-11-07T17:16:11,170 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b268afec57f041b3bd1af9315a78cdcf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/b268afec57f041b3bd1af9315a78cdcf 2024-11-07T17:16:11,171 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1947152e3d314ce88947053f34e54cf9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1947152e3d314ce88947053f34e54cf9 2024-11-07T17:16:11,172 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6fad5c4707f84cdc8463a8563d524d5d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6fad5c4707f84cdc8463a8563d524d5d 2024-11-07T17:16:11,173 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6a56629c5ad74dd68e786830ca5d2167 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6a56629c5ad74dd68e786830ca5d2167 2024-11-07T17:16:11,174 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/aef67027dbd54904a211c1af8ae6ab43 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/aef67027dbd54904a211c1af8ae6ab43 2024-11-07T17:16:11,175 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/bcb0e11ab039489db35ad8ab3f12b429 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/bcb0e11ab039489db35ad8ab3f12b429 2024-11-07T17:16:11,175 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/5b057b6b7d1d47728a33b9eecb992426 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/5b057b6b7d1d47728a33b9eecb992426 2024-11-07T17:16:11,176 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/9635665c0e8441a59305d7e7246e2e80 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/9635665c0e8441a59305d7e7246e2e80 2024-11-07T17:16:11,177 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/726e13cbbc604081b9c7e8a9619f28d4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/726e13cbbc604081b9c7e8a9619f28d4 2024-11-07T17:16:11,178 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/949e367dc2c84146b1cf50fac53d5b45 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/949e367dc2c84146b1cf50fac53d5b45 2024-11-07T17:16:11,179 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/8c803a4a148043cd91c2da3c7f53335a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/8c803a4a148043cd91c2da3c7f53335a 2024-11-07T17:16:11,180 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/36733d0c449a4ca6a92c33f81677336f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/36733d0c449a4ca6a92c33f81677336f 2024-11-07T17:16:11,181 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/2ba2d57d6ceb41de99388d29281a0b18 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/2ba2d57d6ceb41de99388d29281a0b18 2024-11-07T17:16:11,182 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0b1ff6b2599242f9804daa63f3d5e7e5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0b1ff6b2599242f9804daa63f3d5e7e5 2024-11-07T17:16:11,182 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/19f05b4413e641bf8a5fe18897dd87d7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/19f05b4413e641bf8a5fe18897dd87d7 2024-11-07T17:16:11,183 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a3f0c862eadb44c8af92a3e52774ca4d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a3f0c862eadb44c8af92a3e52774ca4d 2024-11-07T17:16:11,184 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/6c47475b5e9a4ef6b2bf2debc7f0c7a8 2024-11-07T17:16:11,185 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/00434188772b462680a296613d0d8fec to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/00434188772b462680a296613d0d8fec 2024-11-07T17:16:11,186 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/95b4179cb9fc477cb1d2611cab022dd9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/95b4179cb9fc477cb1d2611cab022dd9 2024-11-07T17:16:11,187 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/27a8a9229fc04ed1ae30c422e6fb9dde to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/27a8a9229fc04ed1ae30c422e6fb9dde 2024-11-07T17:16:11,188 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a77beb83095b4cd6a14a05280f7a30da to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/a77beb83095b4cd6a14a05280f7a30da 2024-11-07T17:16:11,189 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1fcfa93e1fca48fab310f387c8d6cbac to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/1fcfa93e1fca48fab310f387c8d6cbac 2024-11-07T17:16:11,190 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9c4a9596311e4314b5fe1921a0fbe3a7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d98dd9a0c7944694aae18e88129b8804, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/33fc364a51e9486a85d46452273dae8d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/c1dcdc7795bb49f09c65d9cb67d19f13, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bd4d8b4587714c3bae3ce10262e8a385, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a0665bc4473c4d8594f19dafd2232ed1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f202d7b991514def8aa6ed0291bacce5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/beca7f2e646d417fa62fd4be4502a038, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/79aab6ed2700480cb690ba291058772a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f39115a3a32a4bcbaabd123ef5f5691e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d550d8acdb4b49c7b9db1928dcdfa00a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bff86eb3eccf493ab798522fe1d1a174, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/5e3014ad02e64b54b8485f97b575a178, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9cb530ea73ca45598890b45c54c7c69f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/ae131beb3d8a4ba78e4869567c3fada4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/4afb2a657be74746b1109a355a8e88e0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/583bb4ede50147ee9c7107841c8fb93c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/3261530fb680442fa5db93639151c7fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2fd1275978844d7fbfeba67a99855afa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/78c2c00160f0488e88bd8a95f247a688, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a524e0ed248e45d5acb49968a125ccc5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a95a53ecf35c43fe8d5f0e1b944d5968, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/8765882a9a6446e3901b489de3db1dea, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2e313d73bdf4426fad5b59650f447a19, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/1448a22f182040d0bec1376d5d1bc68f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/604fc1a191de4d93b42a68ce9558c099, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9904d232bbc844d09a08edb2a7df1108, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/45e136b56cd24fc0b704707a4b327e9c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/7cda24e1a19c4adcaa58ee68bbeee986] to archive 2024-11-07T17:16:11,191 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:16:11,192 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9c4a9596311e4314b5fe1921a0fbe3a7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9c4a9596311e4314b5fe1921a0fbe3a7 2024-11-07T17:16:11,193 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d98dd9a0c7944694aae18e88129b8804 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d98dd9a0c7944694aae18e88129b8804 2024-11-07T17:16:11,194 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/33fc364a51e9486a85d46452273dae8d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/33fc364a51e9486a85d46452273dae8d 2024-11-07T17:16:11,195 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/c1dcdc7795bb49f09c65d9cb67d19f13 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/c1dcdc7795bb49f09c65d9cb67d19f13 2024-11-07T17:16:11,196 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bd4d8b4587714c3bae3ce10262e8a385 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bd4d8b4587714c3bae3ce10262e8a385 2024-11-07T17:16:11,197 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a0665bc4473c4d8594f19dafd2232ed1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a0665bc4473c4d8594f19dafd2232ed1 2024-11-07T17:16:11,198 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f202d7b991514def8aa6ed0291bacce5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f202d7b991514def8aa6ed0291bacce5 2024-11-07T17:16:11,199 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/beca7f2e646d417fa62fd4be4502a038 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/beca7f2e646d417fa62fd4be4502a038 2024-11-07T17:16:11,199 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/79aab6ed2700480cb690ba291058772a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/79aab6ed2700480cb690ba291058772a 2024-11-07T17:16:11,201 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f39115a3a32a4bcbaabd123ef5f5691e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/f39115a3a32a4bcbaabd123ef5f5691e 2024-11-07T17:16:11,202 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d550d8acdb4b49c7b9db1928dcdfa00a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/d550d8acdb4b49c7b9db1928dcdfa00a 2024-11-07T17:16:11,203 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bff86eb3eccf493ab798522fe1d1a174 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/bff86eb3eccf493ab798522fe1d1a174 2024-11-07T17:16:11,203 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/5e3014ad02e64b54b8485f97b575a178 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/5e3014ad02e64b54b8485f97b575a178 2024-11-07T17:16:11,205 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9cb530ea73ca45598890b45c54c7c69f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9cb530ea73ca45598890b45c54c7c69f 2024-11-07T17:16:11,205 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/ae131beb3d8a4ba78e4869567c3fada4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/ae131beb3d8a4ba78e4869567c3fada4 2024-11-07T17:16:11,206 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/4afb2a657be74746b1109a355a8e88e0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/4afb2a657be74746b1109a355a8e88e0 2024-11-07T17:16:11,207 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/583bb4ede50147ee9c7107841c8fb93c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/583bb4ede50147ee9c7107841c8fb93c 2024-11-07T17:16:11,208 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/3261530fb680442fa5db93639151c7fd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/3261530fb680442fa5db93639151c7fd 2024-11-07T17:16:11,209 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2fd1275978844d7fbfeba67a99855afa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2fd1275978844d7fbfeba67a99855afa 2024-11-07T17:16:11,210 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/78c2c00160f0488e88bd8a95f247a688 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/78c2c00160f0488e88bd8a95f247a688 2024-11-07T17:16:11,211 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a524e0ed248e45d5acb49968a125ccc5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a524e0ed248e45d5acb49968a125ccc5 2024-11-07T17:16:11,212 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a95a53ecf35c43fe8d5f0e1b944d5968 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/a95a53ecf35c43fe8d5f0e1b944d5968 2024-11-07T17:16:11,213 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/8765882a9a6446e3901b489de3db1dea to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/8765882a9a6446e3901b489de3db1dea 2024-11-07T17:16:11,214 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2e313d73bdf4426fad5b59650f447a19 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/2e313d73bdf4426fad5b59650f447a19 2024-11-07T17:16:11,215 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/1448a22f182040d0bec1376d5d1bc68f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/1448a22f182040d0bec1376d5d1bc68f 2024-11-07T17:16:11,216 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/604fc1a191de4d93b42a68ce9558c099 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/604fc1a191de4d93b42a68ce9558c099 2024-11-07T17:16:11,216 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9904d232bbc844d09a08edb2a7df1108 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/9904d232bbc844d09a08edb2a7df1108 2024-11-07T17:16:11,217 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/45e136b56cd24fc0b704707a4b327e9c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/45e136b56cd24fc0b704707a4b327e9c 2024-11-07T17:16:11,218 DEBUG [StoreCloser-TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/7cda24e1a19c4adcaa58ee68bbeee986 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/7cda24e1a19c4adcaa58ee68bbeee986 2024-11-07T17:16:11,223 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/recovered.edits/431.seqid, newMaxSeqId=431, maxSeqId=4 2024-11-07T17:16:11,224 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6. 2024-11-07T17:16:11,224 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1635): Region close journal for 5c144e9b474530f0a58afb1f68827ff6: 2024-11-07T17:16:11,225 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(170): Closed 5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,226 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=5c144e9b474530f0a58afb1f68827ff6, regionState=CLOSED 2024-11-07T17:16:11,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-07T17:16:11,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseRegionProcedure 5c144e9b474530f0a58afb1f68827ff6, server=3a0fde618c86,37403,1730999712734 in 1.9090 sec 2024-11-07T17:16:11,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-07T17:16:11,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5c144e9b474530f0a58afb1f68827ff6, UNASSIGN in 1.9130 sec 2024-11-07T17:16:11,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-07T17:16:11,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9150 sec 2024-11-07T17:16:11,231 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999771231"}]},"ts":"1730999771231"} 2024-11-07T17:16:11,232 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T17:16:11,234 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T17:16:11,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9280 sec 2024-11-07T17:16:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-07T17:16:11,414 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-07T17:16:11,414 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T17:16:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,416 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,416 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=69, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T17:16:11,419 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,421 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/recovered.edits] 2024-11-07T17:16:11,423 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/246e321adf044cb686e9020ac92ebfb2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/246e321adf044cb686e9020ac92ebfb2 2024-11-07T17:16:11,424 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c18853e67dce46cbb9cbdddbc9ec8da7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/A/c18853e67dce46cbb9cbdddbc9ec8da7 2024-11-07T17:16:11,426 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0089f4db72914798bab376acc0cde7b1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/0089f4db72914798bab376acc0cde7b1 2024-11-07T17:16:11,428 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/e8d421d77b854a158869dd4eb065a42b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/B/e8d421d77b854a158869dd4eb065a42b 2024-11-07T17:16:11,430 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/06ca54523bfe4f8dae83e234ee028f75 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/06ca54523bfe4f8dae83e234ee028f75 2024-11-07T17:16:11,431 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/dd4150c826ca462180d375ad3f1cb8e2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/C/dd4150c826ca462180d375ad3f1cb8e2 2024-11-07T17:16:11,433 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/recovered.edits/431.seqid to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6/recovered.edits/431.seqid 2024-11-07T17:16:11,434 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,434 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T17:16:11,435 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T17:16:11,436 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-07T17:16:11,439 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070b6998433e3f4cf6aac4082cb25ac07e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070b6998433e3f4cf6aac4082cb25ac07e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,440 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070bc80e8a6d1a41a98e6ea3a3bd089e9e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070bc80e8a6d1a41a98e6ea3a3bd089e9e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,441 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070c8c4a891c064d5c90de8e663fbc9032_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070c8c4a891c064d5c90de8e663fbc9032_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,442 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110721dd706ba8c345f49ecbf0f4c89720fd_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110721dd706ba8c345f49ecbf0f4c89720fd_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,443 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110748b3f01cc7f2411d8b693ff34aedd57b_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110748b3f01cc7f2411d8b693ff34aedd57b_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,445 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075d4a88b5067a48adb603691e3ce64b53_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075d4a88b5067a48adb603691e3ce64b53_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,446 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107604579dd391d4c0ca9cf5f3d3c0fc0e6_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107604579dd391d4c0ca9cf5f3d3c0fc0e6_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,447 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110760ff6d2f1c2f49af9a086dc4c9ed5e34_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110760ff6d2f1c2f49af9a086dc4c9ed5e34_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,448 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107631ede8b07b340da94e4324bd886f8a2_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107631ede8b07b340da94e4324bd886f8a2_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,449 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077dea312be5e540e3aba16a6e5eb19385_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077dea312be5e540e3aba16a6e5eb19385_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,450 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078f8382cd7d054c73b4ecfd10bdfb1f3e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078f8382cd7d054c73b4ecfd10bdfb1f3e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,451 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107953f59ce936c494bbde59b44143767b1_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107953f59ce936c494bbde59b44143767b1_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,452 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079f592a41421f48d890a5e6aebfd36661_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079f592a41421f48d890a5e6aebfd36661_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,453 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a6190a4125984f9ba32c60fdc6825fb5_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a6190a4125984f9ba32c60fdc6825fb5_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,454 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a6a1cbb17c4343859c9127e933a47d86_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a6a1cbb17c4343859c9127e933a47d86_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,455 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b787509e96e9400ca8c1c7fe7dab06ab_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b787509e96e9400ca8c1c7fe7dab06ab_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,456 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c1c99f6f416e44ccb5601b0d1d7e9bc1_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c1c99f6f416e44ccb5601b0d1d7e9bc1_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,458 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d5f3e3c91bcd47218a14971ca9ee5d43_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d5f3e3c91bcd47218a14971ca9ee5d43_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,459 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ddf7ba7ada1347f89d5af701daab965f_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ddf7ba7ada1347f89d5af701daab965f_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,460 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f1951afce9ab4ef9936836ba6a83bdab_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f1951afce9ab4ef9936836ba6a83bdab_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,461 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fc99fdc4272146acb1caa95f016f6e8e_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fc99fdc4272146acb1caa95f016f6e8e_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,462 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fe5c4b5d39b34a7e8760ced7982d35b1_5c144e9b474530f0a58afb1f68827ff6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fe5c4b5d39b34a7e8760ced7982d35b1_5c144e9b474530f0a58afb1f68827ff6 2024-11-07T17:16:11,462 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T17:16:11,464 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=69, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,467 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T17:16:11,469 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T17:16:11,470 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=69, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,470 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T17:16:11,470 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730999771470"}]},"ts":"9223372036854775807"} 2024-11-07T17:16:11,471 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T17:16:11,471 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5c144e9b474530f0a58afb1f68827ff6, NAME => 'TestAcidGuarantees,,1730999743144.5c144e9b474530f0a58afb1f68827ff6.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T17:16:11,471 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T17:16:11,472 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730999771471"}]},"ts":"9223372036854775807"} 2024-11-07T17:16:11,473 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T17:16:11,475 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=69, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 61 msec 2024-11-07T17:16:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T17:16:11,518 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-07T17:16:11,528 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=242 (was 239) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3b59e39d-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1998448365_22 at /127.0.0.1:54352 [Waiting for operation #252] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1742492692_22 at /127.0.0.1:55496 [Waiting for operation #399] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1998448365_22 at /127.0.0.1:41034 [Waiting for operation #610] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1742492692_22 at /127.0.0.1:54336 [Waiting for operation #252] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=462 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=407 (was 356) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2925 (was 3215) 2024-11-07T17:16:11,537 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=242, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=407, ProcessCount=11, AvailableMemoryMB=2925 2024-11-07T17:16:11,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:16:11,539 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:16:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:11,540 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:16:11,541 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:11,541 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 70 2024-11-07T17:16:11,541 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:16:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-07T17:16:11,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742089_1265 (size=960) 2024-11-07T17:16:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-07T17:16:11,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-07T17:16:11,949 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:16:11,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742090_1266 (size=53) 2024-11-07T17:16:12,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-07T17:16:12,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:16:12,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9c87afdeea8af3233cd3eafc720d61a6, disabling compactions & flushes 2024-11-07T17:16:12,355 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. after waiting 0 ms 2024-11-07T17:16:12,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,355 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:12,356 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:16:12,357 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730999772356"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999772356"}]},"ts":"1730999772356"} 2024-11-07T17:16:12,358 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:16:12,358 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:16:12,359 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999772358"}]},"ts":"1730999772358"} 2024-11-07T17:16:12,359 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T17:16:12,365 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, ASSIGN}] 2024-11-07T17:16:12,366 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, ASSIGN 2024-11-07T17:16:12,367 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:16:12,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T17:16:12,517 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=9c87afdeea8af3233cd3eafc720d61a6, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:12,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; OpenRegionProcedure 9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:16:12,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-07T17:16:12,670 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:12,673 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,673 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7285): Opening region: {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:16:12,673 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,673 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:16:12,673 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7327): checking encryption for 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,674 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7330): checking classloading for 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,675 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,676 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:12,676 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c87afdeea8af3233cd3eafc720d61a6 columnFamilyName A 2024-11-07T17:16:12,676 DEBUG [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:12,676 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.HStore(327): Store=9c87afdeea8af3233cd3eafc720d61a6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:12,677 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,677 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:12,678 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c87afdeea8af3233cd3eafc720d61a6 columnFamilyName B 2024-11-07T17:16:12,678 DEBUG [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:12,678 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.HStore(327): Store=9c87afdeea8af3233cd3eafc720d61a6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:12,678 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,679 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:12,679 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c87afdeea8af3233cd3eafc720d61a6 columnFamilyName C 2024-11-07T17:16:12,679 DEBUG [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:12,679 INFO [StoreOpener-9c87afdeea8af3233cd3eafc720d61a6-1 {}] regionserver.HStore(327): Store=9c87afdeea8af3233cd3eafc720d61a6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:12,680 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,680 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,681 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,682 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:16:12,683 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1085): writing seq id for 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:12,685 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:16:12,685 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1102): Opened 9c87afdeea8af3233cd3eafc720d61a6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72159608, jitterRate=0.07526195049285889}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:16:12,686 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1001): Region open journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:12,686 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., pid=72, masterSystemTime=1730999772670 2024-11-07T17:16:12,688 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,688 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:12,688 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=9c87afdeea8af3233cd3eafc720d61a6, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:12,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-07T17:16:12,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; OpenRegionProcedure 9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 in 171 msec 2024-11-07T17:16:12,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=71, resume processing ppid=70 2024-11-07T17:16:12,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, ppid=70, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, ASSIGN in 325 msec 2024-11-07T17:16:12,692 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:16:12,692 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999772692"}]},"ts":"1730999772692"} 2024-11-07T17:16:12,693 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T17:16:12,695 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:16:12,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1560 sec 2024-11-07T17:16:13,152 ERROR [LeaseRenewer:jenkins@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:39903,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:13,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-07T17:16:13,645 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 70 completed 2024-11-07T17:16:13,647 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-11-07T17:16:13,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,652 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,653 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41356, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,654 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:16:13,655 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:16:13,657 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-11-07T17:16:13,661 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,662 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-11-07T17:16:13,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,666 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-11-07T17:16:13,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,670 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-11-07T17:16:13,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,673 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-07T17:16:13,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-11-07T17:16:13,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-11-07T17:16:13,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,683 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-11-07T17:16:13,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,687 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d1403c3 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328852db 2024-11-07T17:16:13,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1730a60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,690 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3bf0ba59 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b9e2976 2024-11-07T17:16:13,692 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598cfed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:13,696 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-07T17:16:13,697 DEBUG [hconnection-0x25cfb477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,697 DEBUG [hconnection-0x20295484-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,697 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:13,697 DEBUG [hconnection-0x666c232f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,698 DEBUG [hconnection-0x7883cb07-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,698 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,698 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T17:16:13,698 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:13,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:13,699 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,699 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41388, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,702 DEBUG [hconnection-0x221c9095-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,702 DEBUG [hconnection-0x2acaf34f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,702 DEBUG [hconnection-0x1a57398b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,703 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,703 DEBUG [hconnection-0x210b1dab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,703 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41410, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,703 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,703 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:13,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:16:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:13,724 DEBUG [hconnection-0x3f9b203a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,725 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,733 DEBUG [hconnection-0x32afd8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:13,734 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:13,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4493a26b8e3e4c47b5973b98fe94bdf7 is 50, key is test_row_0/A:col10/1730999773707/Put/seqid=0 2024-11-07T17:16:13,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742091_1267 (size=12001) 2024-11-07T17:16:13,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999833772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999833774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999833774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999833776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999833775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T17:16:13,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:13,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:13,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:13,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:13,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:13,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999833882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999833883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999833883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999833883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:13,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999833884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T17:16:14,004 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:14,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:14,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999834086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999834086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999834086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999834087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999834087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,158 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4493a26b8e3e4c47b5973b98fe94bdf7 2024-11-07T17:16:14,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:14,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:14,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a3d5cec1e1d54196bb7842ac2dd57344 is 50, key is test_row_0/B:col10/1730999773707/Put/seqid=0 2024-11-07T17:16:14,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742092_1268 (size=12001) 2024-11-07T17:16:14,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a3d5cec1e1d54196bb7842ac2dd57344 2024-11-07T17:16:14,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/94fad211cd2e43d8b5f5074371a6ecad is 50, key is test_row_0/C:col10/1730999773707/Put/seqid=0 2024-11-07T17:16:14,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742093_1269 (size=12001) 2024-11-07T17:16:14,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T17:16:14,312 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:14,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999834390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999834390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999834390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999834391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999834391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,465 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:14,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:14,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,618 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:14,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:14,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:14,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/94fad211cd2e43d8b5f5074371a6ecad 2024-11-07T17:16:14,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4493a26b8e3e4c47b5973b98fe94bdf7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4493a26b8e3e4c47b5973b98fe94bdf7 2024-11-07T17:16:14,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4493a26b8e3e4c47b5973b98fe94bdf7, entries=150, sequenceid=14, filesize=11.7 K 2024-11-07T17:16:14,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a3d5cec1e1d54196bb7842ac2dd57344 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a3d5cec1e1d54196bb7842ac2dd57344 2024-11-07T17:16:14,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a3d5cec1e1d54196bb7842ac2dd57344, entries=150, sequenceid=14, filesize=11.7 K 2024-11-07T17:16:14,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/94fad211cd2e43d8b5f5074371a6ecad as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/94fad211cd2e43d8b5f5074371a6ecad 2024-11-07T17:16:14,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/94fad211cd2e43d8b5f5074371a6ecad, entries=150, sequenceid=14, filesize=11.7 K 2024-11-07T17:16:14,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9c87afdeea8af3233cd3eafc720d61a6 in 945ms, sequenceid=14, compaction requested=false 2024-11-07T17:16:14,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:14,771 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T17:16:14,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:14,772 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:16:14,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:14,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:14,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:14,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:14,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:14,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:14,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/84bef5016e554cffb5c914aa153fa625 is 50, key is test_row_0/A:col10/1730999773772/Put/seqid=0 2024-11-07T17:16:14,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742094_1270 (size=12001) 2024-11-07T17:16:14,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T17:16:14,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:14,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:14,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999834933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999834933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999834934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999834934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:14,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999834934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999835037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999835037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999835038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999835038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999835038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,183 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/84bef5016e554cffb5c914aa153fa625 2024-11-07T17:16:15,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/edeac1a7b2f94e76b10a7bdccbda3abc is 50, key is test_row_0/B:col10/1730999773772/Put/seqid=0 2024-11-07T17:16:15,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742095_1271 (size=12001) 2024-11-07T17:16:15,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999835240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999835240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999835241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999835242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999835242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999835544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999835544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999835545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999835545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999835546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,599 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/edeac1a7b2f94e76b10a7bdccbda3abc 2024-11-07T17:16:15,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/9c71c4167e274840a6b50914434c5c32 is 50, key is test_row_0/C:col10/1730999773772/Put/seqid=0 2024-11-07T17:16:15,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742096_1272 (size=12001) 2024-11-07T17:16:15,612 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/9c71c4167e274840a6b50914434c5c32 2024-11-07T17:16:15,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/84bef5016e554cffb5c914aa153fa625 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/84bef5016e554cffb5c914aa153fa625 2024-11-07T17:16:15,620 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/84bef5016e554cffb5c914aa153fa625, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:16:15,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/edeac1a7b2f94e76b10a7bdccbda3abc as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/edeac1a7b2f94e76b10a7bdccbda3abc 2024-11-07T17:16:15,625 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/edeac1a7b2f94e76b10a7bdccbda3abc, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:16:15,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/9c71c4167e274840a6b50914434c5c32 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9c71c4167e274840a6b50914434c5c32 2024-11-07T17:16:15,629 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9c71c4167e274840a6b50914434c5c32, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:16:15,630 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for 9c87afdeea8af3233cd3eafc720d61a6 in 858ms, sequenceid=37, compaction requested=false 2024-11-07T17:16:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-07T17:16:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-07T17:16:15,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-07T17:16:15,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9330 sec 2024-11-07T17:16:15,634 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.9370 sec 2024-11-07T17:16:15,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T17:16:15,802 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-07T17:16:15,804 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:15,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-07T17:16:15,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T17:16:15,805 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:15,806 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:15,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:15,869 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:39903,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:15,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T17:16:15,958 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:15,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T17:16:15,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:15,959 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:16:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:15,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2a61f1c7b58c4de9be6dd4f80e91a2f9 is 50, key is test_row_0/A:col10/1730999774932/Put/seqid=0 2024-11-07T17:16:15,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742097_1273 (size=12001) 2024-11-07T17:16:16,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:16,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:16,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T17:16:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999836104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999836104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999836104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999836105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999836108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999836210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999836210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999836210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999836210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999836212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,369 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2a61f1c7b58c4de9be6dd4f80e91a2f9 2024-11-07T17:16:16,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/121f765cdca747508ae978428ceab6a6 is 50, key is test_row_0/B:col10/1730999774932/Put/seqid=0 2024-11-07T17:16:16,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742098_1274 (size=12001) 2024-11-07T17:16:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T17:16:16,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999836412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999836412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999836413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999836414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999836414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,667 DEBUG [master/3a0fde618c86:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3c877d3c3f531453d06f6bdf82c5263b changed from -1.0 to 0.0, refreshing cache 2024-11-07T17:16:16,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999836715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999836716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999836716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999836717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:16,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999836717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:16,747 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T17:16:16,782 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/121f765cdca747508ae978428ceab6a6 2024-11-07T17:16:16,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/aa8e005ee65a4f729b4dfabdd5bdba1b is 50, key is test_row_0/C:col10/1730999774932/Put/seqid=0 2024-11-07T17:16:16,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742099_1275 (size=12001) 2024-11-07T17:16:16,798 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/aa8e005ee65a4f729b4dfabdd5bdba1b 2024-11-07T17:16:16,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2a61f1c7b58c4de9be6dd4f80e91a2f9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2a61f1c7b58c4de9be6dd4f80e91a2f9 2024-11-07T17:16:16,806 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2a61f1c7b58c4de9be6dd4f80e91a2f9, entries=150, sequenceid=52, filesize=11.7 K 2024-11-07T17:16:16,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/121f765cdca747508ae978428ceab6a6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/121f765cdca747508ae978428ceab6a6 2024-11-07T17:16:16,811 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/121f765cdca747508ae978428ceab6a6, entries=150, sequenceid=52, filesize=11.7 K 2024-11-07T17:16:16,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/aa8e005ee65a4f729b4dfabdd5bdba1b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/aa8e005ee65a4f729b4dfabdd5bdba1b 2024-11-07T17:16:16,817 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/aa8e005ee65a4f729b4dfabdd5bdba1b, entries=150, sequenceid=52, filesize=11.7 K 2024-11-07T17:16:16,818 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9c87afdeea8af3233cd3eafc720d61a6 in 859ms, sequenceid=52, compaction requested=true 2024-11-07T17:16:16,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:16,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:16,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-07T17:16:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-07T17:16:16,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-07T17:16:16,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0140 sec 2024-11-07T17:16:16,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.0180 sec 2024-11-07T17:16:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T17:16:16,909 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-07T17:16:16,910 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-07T17:16:16,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T17:16:16,912 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:16,912 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:16,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T17:16:17,064 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-07T17:16:17,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:17,065 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:16:17,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:17,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:17,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:17,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:17,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:17,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:17,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/c08233e8a87f47058d0f8054dd1a3144 is 50, key is test_row_0/A:col10/1730999776107/Put/seqid=0 2024-11-07T17:16:17,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742100_1276 (size=12001) 2024-11-07T17:16:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T17:16:17,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:17,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:17,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999837229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999837230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999837232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999837233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999837233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999837334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999837335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999837337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999837337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999837337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,476 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/c08233e8a87f47058d0f8054dd1a3144 2024-11-07T17:16:17,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/670e4762aef54ce0bfbc608e4a84a059 is 50, key is test_row_0/B:col10/1730999776107/Put/seqid=0 2024-11-07T17:16:17,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742101_1277 (size=12001) 2024-11-07T17:16:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T17:16:17,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999837538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999837538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999837540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999837541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999837541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999837840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999837841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999837843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999837843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999837844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:17,889 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/670e4762aef54ce0bfbc608e4a84a059 2024-11-07T17:16:17,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/c80be1d714c747f986993d1cfaabcfe4 is 50, key is test_row_0/C:col10/1730999776107/Put/seqid=0 2024-11-07T17:16:17,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742102_1278 (size=12001) 2024-11-07T17:16:17,900 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/c80be1d714c747f986993d1cfaabcfe4 2024-11-07T17:16:17,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/c08233e8a87f47058d0f8054dd1a3144 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/c08233e8a87f47058d0f8054dd1a3144 2024-11-07T17:16:17,910 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/c08233e8a87f47058d0f8054dd1a3144, entries=150, sequenceid=73, filesize=11.7 K 2024-11-07T17:16:17,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/670e4762aef54ce0bfbc608e4a84a059 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/670e4762aef54ce0bfbc608e4a84a059 2024-11-07T17:16:17,922 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/670e4762aef54ce0bfbc608e4a84a059, entries=150, sequenceid=73, filesize=11.7 K 2024-11-07T17:16:17,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/c80be1d714c747f986993d1cfaabcfe4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/c80be1d714c747f986993d1cfaabcfe4 2024-11-07T17:16:17,928 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/c80be1d714c747f986993d1cfaabcfe4, entries=150, sequenceid=73, filesize=11.7 K 2024-11-07T17:16:17,929 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 9c87afdeea8af3233cd3eafc720d61a6 in 864ms, sequenceid=73, compaction requested=true 2024-11-07T17:16:17,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:17,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:17,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-07T17:16:17,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-07T17:16:17,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-07T17:16:17,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0210 sec 2024-11-07T17:16:17,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.0260 sec 2024-11-07T17:16:18,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T17:16:18,015 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-07T17:16:18,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:18,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-07T17:16:18,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T17:16:18,017 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:18,018 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:18,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:18,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T17:16:18,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T17:16:18,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:18,170 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:16:18,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:18,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:18,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/d9505e1549ad42f78bf9c88139fba59b is 50, key is test_row_0/A:col10/1730999777231/Put/seqid=0 2024-11-07T17:16:18,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742103_1279 (size=12001) 2024-11-07T17:16:18,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T17:16:18,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:18,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:18,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999838361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999838363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999838364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999838364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999838365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999838466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999838466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999838468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999838469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999838469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,581 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/d9505e1549ad42f78bf9c88139fba59b 2024-11-07T17:16:18,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/cca826f9ea514011a01d87662c0f0e57 is 50, key is test_row_0/B:col10/1730999777231/Put/seqid=0 2024-11-07T17:16:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742104_1280 (size=12001) 2024-11-07T17:16:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T17:16:18,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999838669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999838669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999838671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999838672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999838673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999838972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999838973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999838975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999838976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:18,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999838976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,011 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/cca826f9ea514011a01d87662c0f0e57 2024-11-07T17:16:19,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/2d270d77430d40d781ed75d74e0a71d8 is 50, key is test_row_0/C:col10/1730999777231/Put/seqid=0 2024-11-07T17:16:19,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742105_1281 (size=12001) 2024-11-07T17:16:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T17:16:19,423 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/2d270d77430d40d781ed75d74e0a71d8 2024-11-07T17:16:19,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/d9505e1549ad42f78bf9c88139fba59b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d9505e1549ad42f78bf9c88139fba59b 2024-11-07T17:16:19,432 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d9505e1549ad42f78bf9c88139fba59b, entries=150, sequenceid=89, filesize=11.7 K 2024-11-07T17:16:19,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/cca826f9ea514011a01d87662c0f0e57 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/cca826f9ea514011a01d87662c0f0e57 2024-11-07T17:16:19,436 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/cca826f9ea514011a01d87662c0f0e57, entries=150, sequenceid=89, filesize=11.7 K 2024-11-07T17:16:19,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/2d270d77430d40d781ed75d74e0a71d8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2d270d77430d40d781ed75d74e0a71d8 2024-11-07T17:16:19,440 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2d270d77430d40d781ed75d74e0a71d8, entries=150, sequenceid=89, filesize=11.7 K 2024-11-07T17:16:19,441 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 9c87afdeea8af3233cd3eafc720d61a6 in 1271ms, sequenceid=89, compaction requested=true 2024-11-07T17:16:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-07T17:16:19,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-07T17:16:19,443 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-07T17:16:19,443 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4240 sec 2024-11-07T17:16:19,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.4280 sec 2024-11-07T17:16:19,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:19,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:16:19,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:19,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:19,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:19,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:19,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:19,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:19,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/0133bb07e9cb4a01b161b1f02137a87f is 50, key is test_row_0/A:col10/1730999779478/Put/seqid=0 2024-11-07T17:16:19,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742106_1282 (size=14341) 2024-11-07T17:16:19,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999839488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999839490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999839491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999839492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999839492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999839593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999839595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999839595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999839597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999839598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999839796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999839798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999839799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999839800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:19,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999839802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:19,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/0133bb07e9cb4a01b161b1f02137a87f 2024-11-07T17:16:19,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/194d4bbd3adc42b1b9a78a0666056ed6 is 50, key is test_row_0/B:col10/1730999779478/Put/seqid=0 2024-11-07T17:16:19,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742107_1283 (size=12001) 2024-11-07T17:16:20,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999840100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999840102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999840102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999840105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999840107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T17:16:20,122 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-07T17:16:20,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:20,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-07T17:16:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T17:16:20,125 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:20,125 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:20,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:20,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T17:16:20,277 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T17:16:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:20,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:20,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:20,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/194d4bbd3adc42b1b9a78a0666056ed6 2024-11-07T17:16:20,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/9f31135fbd234a0980023daeaa0b7f88 is 50, key is test_row_0/C:col10/1730999779478/Put/seqid=0 2024-11-07T17:16:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742108_1284 (size=12001) 2024-11-07T17:16:20,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/9f31135fbd234a0980023daeaa0b7f88 2024-11-07T17:16:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/0133bb07e9cb4a01b161b1f02137a87f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0133bb07e9cb4a01b161b1f02137a87f 2024-11-07T17:16:20,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0133bb07e9cb4a01b161b1f02137a87f, entries=200, sequenceid=110, filesize=14.0 K 2024-11-07T17:16:20,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/194d4bbd3adc42b1b9a78a0666056ed6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/194d4bbd3adc42b1b9a78a0666056ed6 2024-11-07T17:16:20,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/194d4bbd3adc42b1b9a78a0666056ed6, entries=150, sequenceid=110, filesize=11.7 K 2024-11-07T17:16:20,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/9f31135fbd234a0980023daeaa0b7f88 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9f31135fbd234a0980023daeaa0b7f88 2024-11-07T17:16:20,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9f31135fbd234a0980023daeaa0b7f88, entries=150, sequenceid=110, filesize=11.7 K 2024-11-07T17:16:20,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9c87afdeea8af3233cd3eafc720d61a6 in 851ms, sequenceid=110, compaction requested=true 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:20,330 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:20,330 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-07T17:16:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:20,332 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-07T17:16:20,332 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74346 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-07T17:16:20,333 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/B is initiating minor compaction (all files) 2024-11-07T17:16:20,333 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/A is initiating minor compaction (all files) 2024-11-07T17:16:20,333 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/B in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:20,333 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/A in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:20,333 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a3d5cec1e1d54196bb7842ac2dd57344, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/edeac1a7b2f94e76b10a7bdccbda3abc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/121f765cdca747508ae978428ceab6a6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/670e4762aef54ce0bfbc608e4a84a059, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/cca826f9ea514011a01d87662c0f0e57, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/194d4bbd3adc42b1b9a78a0666056ed6] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=70.3 K 2024-11-07T17:16:20,333 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4493a26b8e3e4c47b5973b98fe94bdf7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/84bef5016e554cffb5c914aa153fa625, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2a61f1c7b58c4de9be6dd4f80e91a2f9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/c08233e8a87f47058d0f8054dd1a3144, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d9505e1549ad42f78bf9c88139fba59b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0133bb07e9cb4a01b161b1f02137a87f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=72.6 K 2024-11-07T17:16:20,334 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a3d5cec1e1d54196bb7842ac2dd57344, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1730999773706 2024-11-07T17:16:20,334 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4493a26b8e3e4c47b5973b98fe94bdf7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1730999773706 2024-11-07T17:16:20,334 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting edeac1a7b2f94e76b10a7bdccbda3abc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999773725 2024-11-07T17:16:20,334 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84bef5016e554cffb5c914aa153fa625, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999773725 2024-11-07T17:16:20,334 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 121f765cdca747508ae978428ceab6a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999774932 2024-11-07T17:16:20,335 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 670e4762aef54ce0bfbc608e4a84a059, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1730999776078 2024-11-07T17:16:20,335 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a61f1c7b58c4de9be6dd4f80e91a2f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999774932 2024-11-07T17:16:20,335 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c08233e8a87f47058d0f8054dd1a3144, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1730999776078 2024-11-07T17:16:20,335 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting cca826f9ea514011a01d87662c0f0e57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1730999777227 2024-11-07T17:16:20,336 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9505e1549ad42f78bf9c88139fba59b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1730999777227 2024-11-07T17:16:20,336 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 194d4bbd3adc42b1b9a78a0666056ed6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1730999778362 2024-11-07T17:16:20,336 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0133bb07e9cb4a01b161b1f02137a87f, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1730999778358 2024-11-07T17:16:20,352 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#B#compaction#234 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:20,352 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/52d6aa35cb5f409b8ff9172888da3567 is 50, key is test_row_0/B:col10/1730999779478/Put/seqid=0 2024-11-07T17:16:20,357 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#A#compaction#235 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:20,358 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2ace928bd4034989986e4935c50778ab is 50, key is test_row_0/A:col10/1730999779478/Put/seqid=0 2024-11-07T17:16:20,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742109_1285 (size=12207) 2024-11-07T17:16:20,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742110_1286 (size=12207) 2024-11-07T17:16:20,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T17:16:20,430 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:20,431 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:20,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4b05aa30f3114374a59d6eaa782c7da0 is 50, key is test_row_0/A:col10/1730999779489/Put/seqid=0 2024-11-07T17:16:20,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742111_1287 (size=12001) 2024-11-07T17:16:20,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:20,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:20,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999840623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999840625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999840626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999840627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999840627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T17:16:20,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999840728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999840730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999840731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999840733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999840734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,771 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/52d6aa35cb5f409b8ff9172888da3567 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/52d6aa35cb5f409b8ff9172888da3567 2024-11-07T17:16:20,777 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/B of 9c87afdeea8af3233cd3eafc720d61a6 into 52d6aa35cb5f409b8ff9172888da3567(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:20,777 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:20,777 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/B, priority=10, startTime=1730999780330; duration=0sec 2024-11-07T17:16:20,777 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:20,777 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:20,777 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-07T17:16:20,779 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-07T17:16:20,779 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/C is initiating minor compaction (all files) 2024-11-07T17:16:20,779 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/C in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:20,779 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/94fad211cd2e43d8b5f5074371a6ecad, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9c71c4167e274840a6b50914434c5c32, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/aa8e005ee65a4f729b4dfabdd5bdba1b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/c80be1d714c747f986993d1cfaabcfe4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2d270d77430d40d781ed75d74e0a71d8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9f31135fbd234a0980023daeaa0b7f88] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=70.3 K 2024-11-07T17:16:20,779 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 94fad211cd2e43d8b5f5074371a6ecad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1730999773706 2024-11-07T17:16:20,780 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c71c4167e274840a6b50914434c5c32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999773725 2024-11-07T17:16:20,780 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting aa8e005ee65a4f729b4dfabdd5bdba1b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999774932 2024-11-07T17:16:20,780 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c80be1d714c747f986993d1cfaabcfe4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1730999776078 2024-11-07T17:16:20,781 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d270d77430d40d781ed75d74e0a71d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1730999777227 2024-11-07T17:16:20,781 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f31135fbd234a0980023daeaa0b7f88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1730999778362 2024-11-07T17:16:20,786 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2ace928bd4034989986e4935c50778ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2ace928bd4034989986e4935c50778ab 2024-11-07T17:16:20,790 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/A of 9c87afdeea8af3233cd3eafc720d61a6 into 2ace928bd4034989986e4935c50778ab(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:20,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:20,790 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/A, priority=10, startTime=1730999780330; duration=0sec 2024-11-07T17:16:20,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:20,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:20,792 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#C#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:20,792 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0a9dd42366144d1fa88b0d0cfbeca3fc is 50, key is test_row_0/C:col10/1730999779478/Put/seqid=0 2024-11-07T17:16:20,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742112_1288 (size=12207) 2024-11-07T17:16:20,842 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4b05aa30f3114374a59d6eaa782c7da0 2024-11-07T17:16:20,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/74220a5a11bb48369bff4d161ee06ead is 50, key is test_row_0/B:col10/1730999779489/Put/seqid=0 2024-11-07T17:16:20,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742113_1289 (size=12001) 2024-11-07T17:16:20,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999840933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999840933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999840936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999840938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:20,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:20,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999840938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,201 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0a9dd42366144d1fa88b0d0cfbeca3fc as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0a9dd42366144d1fa88b0d0cfbeca3fc 2024-11-07T17:16:21,206 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/C of 9c87afdeea8af3233cd3eafc720d61a6 into 0a9dd42366144d1fa88b0d0cfbeca3fc(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:21,206 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:21,206 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/C, priority=10, startTime=1730999780330; duration=0sec 2024-11-07T17:16:21,206 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:21,206 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T17:16:21,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999841235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999841236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999841239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999841241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999841242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,257 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/74220a5a11bb48369bff4d161ee06ead 2024-11-07T17:16:21,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/dfe3d34e71a34a70b9e8d0134ff8a6bb is 50, key is test_row_0/C:col10/1730999779489/Put/seqid=0 2024-11-07T17:16:21,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742114_1290 (size=12001) 2024-11-07T17:16:21,671 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/dfe3d34e71a34a70b9e8d0134ff8a6bb 2024-11-07T17:16:21,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4b05aa30f3114374a59d6eaa782c7da0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4b05aa30f3114374a59d6eaa782c7da0 2024-11-07T17:16:21,680 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4b05aa30f3114374a59d6eaa782c7da0, entries=150, sequenceid=125, filesize=11.7 K 2024-11-07T17:16:21,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/74220a5a11bb48369bff4d161ee06ead as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/74220a5a11bb48369bff4d161ee06ead 2024-11-07T17:16:21,684 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/74220a5a11bb48369bff4d161ee06ead, entries=150, sequenceid=125, filesize=11.7 K 2024-11-07T17:16:21,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/dfe3d34e71a34a70b9e8d0134ff8a6bb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/dfe3d34e71a34a70b9e8d0134ff8a6bb 2024-11-07T17:16:21,689 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/dfe3d34e71a34a70b9e8d0134ff8a6bb, entries=150, sequenceid=125, filesize=11.7 K 2024-11-07T17:16:21,690 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9c87afdeea8af3233cd3eafc720d61a6 in 1259ms, sequenceid=125, compaction requested=false 2024-11-07T17:16:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-07T17:16:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-07T17:16:21,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-07T17:16:21,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5650 sec 2024-11-07T17:16:21,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.5700 sec 2024-11-07T17:16:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:21,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:16:21,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:21,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:21,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:21,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:21,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:21,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:21,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4fd1160f16854e218e441c9540d8db69 is 50, key is test_row_0/A:col10/1730999781742/Put/seqid=0 2024-11-07T17:16:21,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742115_1291 (size=12151) 2024-11-07T17:16:21,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999841751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999841750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999841751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999841754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999841755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999841856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999841856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999841856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999841858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:21,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:21,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999841858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999842058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999842059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999842060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999842061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999842062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4fd1160f16854e218e441c9540d8db69 2024-11-07T17:16:22,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a92e5f57b35d439fb85009c7bc1244ea is 50, key is test_row_0/B:col10/1730999781742/Put/seqid=0 2024-11-07T17:16:22,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742116_1292 (size=12151) 2024-11-07T17:16:22,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T17:16:22,229 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-07T17:16:22,230 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:22,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-07T17:16:22,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:22,232 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:22,232 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:22,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:22,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:22,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999842362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999842362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999842363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999842365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999842366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,384 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T17:16:22,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:22,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T17:16:22,501 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-07T17:16:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:22,537 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T17:16:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a92e5f57b35d439fb85009c7bc1244ea 2024-11-07T17:16:22,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/6b1161e5ac1347d8a5540bfa50e95f05 is 50, key is test_row_0/C:col10/1730999781742/Put/seqid=0 2024-11-07T17:16:22,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742117_1293 (size=12151) 2024-11-07T17:16:22,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T17:16:22,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:22,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:22,843 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T17:16:22,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:22,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999842866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999842868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999842869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999842870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999842871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/6b1161e5ac1347d8a5540bfa50e95f05 2024-11-07T17:16:22,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/4fd1160f16854e218e441c9540d8db69 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4fd1160f16854e218e441c9540d8db69 2024-11-07T17:16:22,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4fd1160f16854e218e441c9540d8db69, entries=150, sequenceid=151, filesize=11.9 K 2024-11-07T17:16:22,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a92e5f57b35d439fb85009c7bc1244ea as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a92e5f57b35d439fb85009c7bc1244ea 2024-11-07T17:16:22,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a92e5f57b35d439fb85009c7bc1244ea, entries=150, sequenceid=151, filesize=11.9 K 2024-11-07T17:16:22,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/6b1161e5ac1347d8a5540bfa50e95f05 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/6b1161e5ac1347d8a5540bfa50e95f05 2024-11-07T17:16:22,996 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:22,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T17:16:22,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:22,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:22,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:22,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/6b1161e5ac1347d8a5540bfa50e95f05, entries=150, sequenceid=151, filesize=11.9 K 2024-11-07T17:16:23,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9c87afdeea8af3233cd3eafc720d61a6 in 1257ms, sequenceid=151, compaction requested=true 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:23,000 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:23,000 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:23,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:23,001 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:23,001 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:23,001 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/A is initiating minor compaction (all files) 2024-11-07T17:16:23,001 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/B is initiating minor compaction (all files) 2024-11-07T17:16:23,001 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/B in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:23,001 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/A in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:23,001 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/52d6aa35cb5f409b8ff9172888da3567, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/74220a5a11bb48369bff4d161ee06ead, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a92e5f57b35d439fb85009c7bc1244ea] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=35.5 K 2024-11-07T17:16:23,001 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2ace928bd4034989986e4935c50778ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4b05aa30f3114374a59d6eaa782c7da0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4fd1160f16854e218e441c9540d8db69] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=35.5 K 2024-11-07T17:16:23,002 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ace928bd4034989986e4935c50778ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1730999778362 2024-11-07T17:16:23,002 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 52d6aa35cb5f409b8ff9172888da3567, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1730999778362 2024-11-07T17:16:23,002 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b05aa30f3114374a59d6eaa782c7da0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1730999779489 2024-11-07T17:16:23,002 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 74220a5a11bb48369bff4d161ee06ead, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1730999779489 2024-11-07T17:16:23,003 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fd1160f16854e218e441c9540d8db69, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1730999780621 2024-11-07T17:16:23,003 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a92e5f57b35d439fb85009c7bc1244ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1730999780621 2024-11-07T17:16:23,011 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#B#compaction#243 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:23,011 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#A#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:23,011 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/9b0879db2890468ab1cded4b48544b43 is 50, key is test_row_0/B:col10/1730999781742/Put/seqid=0 2024-11-07T17:16:23,011 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/0d2522ac2a6c425b940caa07d863944f is 50, key is test_row_0/A:col10/1730999781742/Put/seqid=0 2024-11-07T17:16:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742118_1294 (size=12459) 2024-11-07T17:16:23,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742119_1295 (size=12459) 2024-11-07T17:16:23,027 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/9b0879db2890468ab1cded4b48544b43 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9b0879db2890468ab1cded4b48544b43 2024-11-07T17:16:23,032 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/0d2522ac2a6c425b940caa07d863944f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0d2522ac2a6c425b940caa07d863944f 2024-11-07T17:16:23,035 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/B of 9c87afdeea8af3233cd3eafc720d61a6 into 9b0879db2890468ab1cded4b48544b43(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:23,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:23,035 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/B, priority=13, startTime=1730999783000; duration=0sec 2024-11-07T17:16:23,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:23,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:23,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:23,036 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:23,036 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/C is initiating minor compaction (all files) 2024-11-07T17:16:23,036 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/C in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:23,036 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0a9dd42366144d1fa88b0d0cfbeca3fc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/dfe3d34e71a34a70b9e8d0134ff8a6bb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/6b1161e5ac1347d8a5540bfa50e95f05] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=35.5 K 2024-11-07T17:16:23,037 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a9dd42366144d1fa88b0d0cfbeca3fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1730999778362 2024-11-07T17:16:23,037 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/A of 9c87afdeea8af3233cd3eafc720d61a6 into 0d2522ac2a6c425b940caa07d863944f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:23,037 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:23,037 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/A, priority=13, startTime=1730999783000; duration=0sec 2024-11-07T17:16:23,037 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:23,037 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:23,037 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting dfe3d34e71a34a70b9e8d0134ff8a6bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1730999779489 2024-11-07T17:16:23,037 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b1161e5ac1347d8a5540bfa50e95f05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1730999780621 2024-11-07T17:16:23,045 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#C#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:23,045 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/eba3a9abacac48d1aeb51bee8fadd0a5 is 50, key is test_row_0/C:col10/1730999781742/Put/seqid=0 2024-11-07T17:16:23,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742120_1296 (size=12459) 2024-11-07T17:16:23,055 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/eba3a9abacac48d1aeb51bee8fadd0a5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/eba3a9abacac48d1aeb51bee8fadd0a5 2024-11-07T17:16:23,059 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/C of 9c87afdeea8af3233cd3eafc720d61a6 into eba3a9abacac48d1aeb51bee8fadd0a5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:23,059 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:23,059 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/C, priority=13, startTime=1730999783000; duration=0sec 2024-11-07T17:16:23,060 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:23,060 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:23,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:23,150 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:23,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:23,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/cfacc44c6c3a48fea867e45ea79ea33e is 50, key is test_row_0/A:col10/1730999781754/Put/seqid=0 2024-11-07T17:16:23,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742121_1297 (size=12151) 2024-11-07T17:16:23,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:23,562 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/cfacc44c6c3a48fea867e45ea79ea33e 2024-11-07T17:16:23,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/25ec7eb2ade24f868f24e6e5be0d71d5 is 50, key is test_row_0/B:col10/1730999781754/Put/seqid=0 2024-11-07T17:16:23,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742122_1298 (size=12151) 2024-11-07T17:16:23,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:23,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:23,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999843887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999843888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999843889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999843890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999843890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,974 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/25ec7eb2ade24f868f24e6e5be0d71d5 2024-11-07T17:16:23,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/829607ce4c7e49a3ac830b2e7ab97094 is 50, key is test_row_0/C:col10/1730999781754/Put/seqid=0 2024-11-07T17:16:23,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742123_1299 (size=12151) 2024-11-07T17:16:23,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999843992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999843992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999843993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:23,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999843994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:23,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999843994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999844195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999844195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999844195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999844197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999844198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:24,386 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/829607ce4c7e49a3ac830b2e7ab97094 2024-11-07T17:16:24,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/cfacc44c6c3a48fea867e45ea79ea33e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/cfacc44c6c3a48fea867e45ea79ea33e 2024-11-07T17:16:24,396 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/cfacc44c6c3a48fea867e45ea79ea33e, entries=150, sequenceid=167, filesize=11.9 K 2024-11-07T17:16:24,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/25ec7eb2ade24f868f24e6e5be0d71d5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/25ec7eb2ade24f868f24e6e5be0d71d5 2024-11-07T17:16:24,401 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/25ec7eb2ade24f868f24e6e5be0d71d5, entries=150, sequenceid=167, filesize=11.9 K 2024-11-07T17:16:24,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/829607ce4c7e49a3ac830b2e7ab97094 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/829607ce4c7e49a3ac830b2e7ab97094 2024-11-07T17:16:24,406 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/829607ce4c7e49a3ac830b2e7ab97094, entries=150, sequenceid=167, filesize=11.9 K 2024-11-07T17:16:24,407 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 9c87afdeea8af3233cd3eafc720d61a6 in 1257ms, sequenceid=167, compaction requested=false 2024-11-07T17:16:24,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:24,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:24,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-07T17:16:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-07T17:16:24,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-07T17:16:24,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1750 sec 2024-11-07T17:16:24,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.1800 sec 2024-11-07T17:16:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:24,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T17:16:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:24,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/f7515d7a539a43a29b492e34230f0ff0 is 50, key is test_row_0/A:col10/1730999784500/Put/seqid=0 2024-11-07T17:16:24,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999844505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999844506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999844506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999844507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999844508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742124_1300 (size=12151) 2024-11-07T17:16:24,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999844609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999844610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999844610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999844610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999844611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999844812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999844812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999844813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999844813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999844814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:24,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/f7515d7a539a43a29b492e34230f0ff0 2024-11-07T17:16:24,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a85a7b63cb8848b98c7c471443af9631 is 50, key is test_row_0/B:col10/1730999784500/Put/seqid=0 2024-11-07T17:16:24,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742125_1301 (size=12151) 2024-11-07T17:16:24,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a85a7b63cb8848b98c7c471443af9631 2024-11-07T17:16:24,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/69a5afd5e6f240e2bd61d62581c24fe8 is 50, key is test_row_0/C:col10/1730999784500/Put/seqid=0 2024-11-07T17:16:24,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742126_1302 (size=12151) 2024-11-07T17:16:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999845115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999845115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999845115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999845117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999845118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/69a5afd5e6f240e2bd61d62581c24fe8 2024-11-07T17:16:25,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/f7515d7a539a43a29b492e34230f0ff0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f7515d7a539a43a29b492e34230f0ff0 2024-11-07T17:16:25,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f7515d7a539a43a29b492e34230f0ff0, entries=150, sequenceid=194, filesize=11.9 K 2024-11-07T17:16:25,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/a85a7b63cb8848b98c7c471443af9631 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a85a7b63cb8848b98c7c471443af9631 2024-11-07T17:16:25,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a85a7b63cb8848b98c7c471443af9631, entries=150, sequenceid=194, filesize=11.9 K 2024-11-07T17:16:25,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/69a5afd5e6f240e2bd61d62581c24fe8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/69a5afd5e6f240e2bd61d62581c24fe8 2024-11-07T17:16:25,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/69a5afd5e6f240e2bd61d62581c24fe8, entries=150, sequenceid=194, filesize=11.9 K 2024-11-07T17:16:25,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9c87afdeea8af3233cd3eafc720d61a6 in 871ms, sequenceid=194, compaction requested=true 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:25,372 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:25,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:25,372 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:25,373 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:25,374 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/B is initiating minor compaction (all files) 2024-11-07T17:16:25,374 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/B in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:25,374 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9b0879db2890468ab1cded4b48544b43, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/25ec7eb2ade24f868f24e6e5be0d71d5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a85a7b63cb8848b98c7c471443af9631] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=35.9 K 2024-11-07T17:16:25,374 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:25,374 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b0879db2890468ab1cded4b48544b43, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1730999780621 2024-11-07T17:16:25,374 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/A is initiating minor compaction (all files) 2024-11-07T17:16:25,374 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/A in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:25,374 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0d2522ac2a6c425b940caa07d863944f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/cfacc44c6c3a48fea867e45ea79ea33e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f7515d7a539a43a29b492e34230f0ff0] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=35.9 K 2024-11-07T17:16:25,375 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 25ec7eb2ade24f868f24e6e5be0d71d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1730999781750 2024-11-07T17:16:25,375 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d2522ac2a6c425b940caa07d863944f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1730999780621 2024-11-07T17:16:25,375 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a85a7b63cb8848b98c7c471443af9631, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999783888 2024-11-07T17:16:25,375 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfacc44c6c3a48fea867e45ea79ea33e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1730999781750 2024-11-07T17:16:25,376 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7515d7a539a43a29b492e34230f0ff0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999783888 2024-11-07T17:16:25,394 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#A#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:25,394 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#B#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:25,395 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/278c1f0eebb741a08fe9dfc62d814108 is 50, key is test_row_0/A:col10/1730999784500/Put/seqid=0 2024-11-07T17:16:25,395 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/56c975f3dc1444f8b82af66633e23a57 is 50, key is test_row_0/B:col10/1730999784500/Put/seqid=0 2024-11-07T17:16:25,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742127_1303 (size=12561) 2024-11-07T17:16:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742128_1304 (size=12561) 2024-11-07T17:16:25,414 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/56c975f3dc1444f8b82af66633e23a57 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/56c975f3dc1444f8b82af66633e23a57 2024-11-07T17:16:25,420 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/B of 9c87afdeea8af3233cd3eafc720d61a6 into 56c975f3dc1444f8b82af66633e23a57(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:25,420 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:25,420 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/B, priority=13, startTime=1730999785372; duration=0sec 2024-11-07T17:16:25,420 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:25,420 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:25,420 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:25,422 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:25,422 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/C is initiating minor compaction (all files) 2024-11-07T17:16:25,422 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/C in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:25,422 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/eba3a9abacac48d1aeb51bee8fadd0a5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/829607ce4c7e49a3ac830b2e7ab97094, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/69a5afd5e6f240e2bd61d62581c24fe8] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=35.9 K 2024-11-07T17:16:25,422 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting eba3a9abacac48d1aeb51bee8fadd0a5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1730999780621 2024-11-07T17:16:25,423 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 829607ce4c7e49a3ac830b2e7ab97094, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1730999781750 2024-11-07T17:16:25,423 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 69a5afd5e6f240e2bd61d62581c24fe8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999783888 2024-11-07T17:16:25,432 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#C#compaction#254 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:25,432 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/b8e70d7a73604936bdfc0ef589582ed0 is 50, key is test_row_0/C:col10/1730999784500/Put/seqid=0 2024-11-07T17:16:25,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742129_1305 (size=12561) 2024-11-07T17:16:25,445 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/b8e70d7a73604936bdfc0ef589582ed0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b8e70d7a73604936bdfc0ef589582ed0 2024-11-07T17:16:25,452 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/C of 9c87afdeea8af3233cd3eafc720d61a6 into b8e70d7a73604936bdfc0ef589582ed0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:25,452 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:25,452 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/C, priority=13, startTime=1730999785372; duration=0sec 2024-11-07T17:16:25,452 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:25,452 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:25,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:16:25,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:25,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:25,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:25,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:25,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:25,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:25,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/f5df15f99d25450c9518480fd225a7b5 is 50, key is test_row_0/A:col10/1730999785619/Put/seqid=0 2024-11-07T17:16:25,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742130_1306 (size=12151) 2024-11-07T17:16:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999845637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999845638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999845639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999845640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999845641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999845742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999845742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999845742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999845743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999845744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,813 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/278c1f0eebb741a08fe9dfc62d814108 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/278c1f0eebb741a08fe9dfc62d814108 2024-11-07T17:16:25,818 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/A of 9c87afdeea8af3233cd3eafc720d61a6 into 278c1f0eebb741a08fe9dfc62d814108(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:25,818 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:25,818 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/A, priority=13, startTime=1730999785372; duration=0sec 2024-11-07T17:16:25,819 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:25,819 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:25,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999845944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999845945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999845946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999845946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:25,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999845948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/f5df15f99d25450c9518480fd225a7b5 2024-11-07T17:16:26,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/5f3fe90544c643d385d17c8745db9098 is 50, key is test_row_0/B:col10/1730999785619/Put/seqid=0 2024-11-07T17:16:26,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742131_1307 (size=12151) 2024-11-07T17:16:26,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999846248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999846250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999846250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999846250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999846251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T17:16:26,336 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-07T17:16:26,338 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-07T17:16:26,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T17:16:26,339 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:26,340 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:26,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:26,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T17:16:26,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/5f3fe90544c643d385d17c8745db9098 2024-11-07T17:16:26,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/3138abf26d0a4eabba220d50a84be059 is 50, key is test_row_0/C:col10/1730999785619/Put/seqid=0 2024-11-07T17:16:26,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742132_1308 (size=12151) 2024-11-07T17:16:26,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T17:16:26,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:26,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T17:16:26,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T17:16:26,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:26,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999846754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999846754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999846755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999846756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:26,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999846758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T17:16:26,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:26,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:26,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/3138abf26d0a4eabba220d50a84be059 2024-11-07T17:16:26,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/f5df15f99d25450c9518480fd225a7b5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f5df15f99d25450c9518480fd225a7b5 2024-11-07T17:16:26,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f5df15f99d25450c9518480fd225a7b5, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T17:16:26,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/5f3fe90544c643d385d17c8745db9098 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/5f3fe90544c643d385d17c8745db9098 2024-11-07T17:16:26,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/5f3fe90544c643d385d17c8745db9098, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T17:16:26,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/3138abf26d0a4eabba220d50a84be059 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/3138abf26d0a4eabba220d50a84be059 2024-11-07T17:16:26,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/3138abf26d0a4eabba220d50a84be059, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T17:16:26,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9c87afdeea8af3233cd3eafc720d61a6 in 1258ms, sequenceid=209, compaction requested=false 2024-11-07T17:16:26,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:26,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T17:16:26,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:26,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:26,951 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:26,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:26,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/396a914d12a640ca98b164d47bff9f1d is 50, key is test_row_0/A:col10/1730999785640/Put/seqid=0 2024-11-07T17:16:26,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742133_1309 (size=12151) 2024-11-07T17:16:26,960 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/396a914d12a640ca98b164d47bff9f1d 2024-11-07T17:16:26,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/83ad8a529f6940138e45b26064f16a93 is 50, key is test_row_0/B:col10/1730999785640/Put/seqid=0 2024-11-07T17:16:26,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742134_1310 (size=12151) 2024-11-07T17:16:27,372 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/83ad8a529f6940138e45b26064f16a93 2024-11-07T17:16:27,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/a07ac562e4f54574bbf3622d759c1e1b is 50, key is test_row_0/C:col10/1730999785640/Put/seqid=0 2024-11-07T17:16:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742135_1311 (size=12151) 2024-11-07T17:16:27,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T17:16:27,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:27,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:27,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999847766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999847767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999847767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999847768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999847768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,784 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/a07ac562e4f54574bbf3622d759c1e1b 2024-11-07T17:16:27,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/396a914d12a640ca98b164d47bff9f1d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/396a914d12a640ca98b164d47bff9f1d 2024-11-07T17:16:27,792 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/396a914d12a640ca98b164d47bff9f1d, entries=150, sequenceid=233, filesize=11.9 K 2024-11-07T17:16:27,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/83ad8a529f6940138e45b26064f16a93 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/83ad8a529f6940138e45b26064f16a93 2024-11-07T17:16:27,797 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/83ad8a529f6940138e45b26064f16a93, entries=150, sequenceid=233, filesize=11.9 K 2024-11-07T17:16:27,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/a07ac562e4f54574bbf3622d759c1e1b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/a07ac562e4f54574bbf3622d759c1e1b 2024-11-07T17:16:27,803 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/a07ac562e4f54574bbf3622d759c1e1b, entries=150, sequenceid=233, filesize=11.9 K 2024-11-07T17:16:27,804 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9c87afdeea8af3233cd3eafc720d61a6 in 853ms, sequenceid=233, compaction requested=true 2024-11-07T17:16:27,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:27,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:27,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-07T17:16:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-07T17:16:27,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-07T17:16:27,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4650 sec 2024-11-07T17:16:27,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.4690 sec 2024-11-07T17:16:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:27,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:16:27,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:27,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:27,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:27,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:27,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:27,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:27,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2491ac629b4142bf92c81a87932bb76f is 50, key is test_row_0/A:col10/1730999787871/Put/seqid=0 2024-11-07T17:16:27,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742136_1312 (size=14541) 2024-11-07T17:16:27,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999847918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999847918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999847918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999847919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999847919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999848024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999848024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999848024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999848024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999848024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999848225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999848226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999848226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999848228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999848229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2491ac629b4142bf92c81a87932bb76f 2024-11-07T17:16:28,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/7c1cfb4238f64bc9a8da63c22a46160a is 50, key is test_row_0/B:col10/1730999787871/Put/seqid=0 2024-11-07T17:16:28,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742137_1313 (size=12151) 2024-11-07T17:16:28,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T17:16:28,443 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-07T17:16:28,445 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:28,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-07T17:16:28,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T17:16:28,447 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:28,447 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:28,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:28,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999848529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999848530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999848531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999848531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:28,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999848537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T17:16:28,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-07T17:16:28,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:28,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:28,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:28,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/7c1cfb4238f64bc9a8da63c22a46160a 2024-11-07T17:16:28,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/8e09f5c129f349ada4f59872142958a8 is 50, key is test_row_0/C:col10/1730999787871/Put/seqid=0 2024-11-07T17:16:28,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742138_1314 (size=12151) 2024-11-07T17:16:28,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T17:16:28,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-07T17:16:28,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:28,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:28,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:28,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:28,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-07T17:16:28,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:28,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:28,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:28,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:29,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999849033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999849035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:29,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999849036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999849036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999849039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T17:16:29,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-07T17:16:29,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:29,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:29,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:29,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:29,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:29,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:29,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/8e09f5c129f349ada4f59872142958a8 2024-11-07T17:16:29,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/2491ac629b4142bf92c81a87932bb76f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2491ac629b4142bf92c81a87932bb76f 2024-11-07T17:16:29,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2491ac629b4142bf92c81a87932bb76f, entries=200, sequenceid=249, filesize=14.2 K 2024-11-07T17:16:29,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/7c1cfb4238f64bc9a8da63c22a46160a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/7c1cfb4238f64bc9a8da63c22a46160a 2024-11-07T17:16:29,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/7c1cfb4238f64bc9a8da63c22a46160a, entries=150, sequenceid=249, filesize=11.9 K 2024-11-07T17:16:29,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/8e09f5c129f349ada4f59872142958a8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8e09f5c129f349ada4f59872142958a8 2024-11-07T17:16:29,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8e09f5c129f349ada4f59872142958a8, entries=150, sequenceid=249, filesize=11.9 K 2024-11-07T17:16:29,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 9c87afdeea8af3233cd3eafc720d61a6 in 1257ms, sequenceid=249, compaction requested=true 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:29,130 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:29,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:29,130 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:29,132 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:29,132 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:29,132 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/A is initiating minor compaction (all files) 2024-11-07T17:16:29,132 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/B is initiating minor compaction (all files) 2024-11-07T17:16:29,132 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/A in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:29,132 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/B in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:29,132 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/278c1f0eebb741a08fe9dfc62d814108, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f5df15f99d25450c9518480fd225a7b5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/396a914d12a640ca98b164d47bff9f1d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2491ac629b4142bf92c81a87932bb76f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=50.2 K 2024-11-07T17:16:29,132 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/56c975f3dc1444f8b82af66633e23a57, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/5f3fe90544c643d385d17c8745db9098, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/83ad8a529f6940138e45b26064f16a93, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/7c1cfb4238f64bc9a8da63c22a46160a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=47.9 K 2024-11-07T17:16:29,133 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 278c1f0eebb741a08fe9dfc62d814108, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999783888 2024-11-07T17:16:29,133 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 56c975f3dc1444f8b82af66633e23a57, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999783888 2024-11-07T17:16:29,133 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5df15f99d25450c9518480fd225a7b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999785619 2024-11-07T17:16:29,133 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f3fe90544c643d385d17c8745db9098, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999785619 2024-11-07T17:16:29,134 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 396a914d12a640ca98b164d47bff9f1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1730999785637 2024-11-07T17:16:29,134 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 83ad8a529f6940138e45b26064f16a93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1730999785637 2024-11-07T17:16:29,134 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2491ac629b4142bf92c81a87932bb76f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730999787766 2024-11-07T17:16:29,134 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c1cfb4238f64bc9a8da63c22a46160a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730999787766 2024-11-07T17:16:29,144 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#B#compaction#264 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:29,144 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4 is 50, key is test_row_0/B:col10/1730999787871/Put/seqid=0 2024-11-07T17:16:29,146 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#A#compaction#265 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:29,146 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/109f8dd9e2f24724a81cb3d8140f944d is 50, key is test_row_0/A:col10/1730999787871/Put/seqid=0 2024-11-07T17:16:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742139_1315 (size=12697) 2024-11-07T17:16:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742140_1316 (size=12697) 2024-11-07T17:16:29,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:29,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-07T17:16:29,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:29,214 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:16:29,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:29,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/8410946827dc494d8f29ded45ddb029d is 50, key is test_row_0/A:col10/1730999787917/Put/seqid=0 2024-11-07T17:16:29,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742141_1317 (size=12301) 2024-11-07T17:16:29,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T17:16:29,563 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/109f8dd9e2f24724a81cb3d8140f944d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/109f8dd9e2f24724a81cb3d8140f944d 2024-11-07T17:16:29,566 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4 2024-11-07T17:16:29,568 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/A of 9c87afdeea8af3233cd3eafc720d61a6 into 109f8dd9e2f24724a81cb3d8140f944d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:29,568 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:29,568 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/A, priority=12, startTime=1730999789130; duration=0sec 2024-11-07T17:16:29,569 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:29,569 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:29,569 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:29,570 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/B of 9c87afdeea8af3233cd3eafc720d61a6 into f222f2ecbb6e4bd6aaceddfe1aafa6f4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:29,570 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:29,570 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/B, priority=12, startTime=1730999789130; duration=0sec 2024-11-07T17:16:29,570 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:29,570 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:29,570 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:29,570 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/C is initiating minor compaction (all files) 2024-11-07T17:16:29,570 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/C in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:29,571 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b8e70d7a73604936bdfc0ef589582ed0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/3138abf26d0a4eabba220d50a84be059, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/a07ac562e4f54574bbf3622d759c1e1b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8e09f5c129f349ada4f59872142958a8] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=47.9 K 2024-11-07T17:16:29,571 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8e70d7a73604936bdfc0ef589582ed0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999783888 2024-11-07T17:16:29,571 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3138abf26d0a4eabba220d50a84be059, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999785619 2024-11-07T17:16:29,572 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a07ac562e4f54574bbf3622d759c1e1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1730999785637 2024-11-07T17:16:29,572 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e09f5c129f349ada4f59872142958a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730999787766 2024-11-07T17:16:29,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#C#compaction#267 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:29,581 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/192411e99fe0476b977ca1175c97e4e4 is 50, key is test_row_0/C:col10/1730999787871/Put/seqid=0 2024-11-07T17:16:29,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742142_1318 (size=12697) 2024-11-07T17:16:29,624 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/8410946827dc494d8f29ded45ddb029d 2024-11-07T17:16:29,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/569fda3d046c47c9b6fbdc73ea210c1c is 50, key is test_row_0/B:col10/1730999787917/Put/seqid=0 2024-11-07T17:16:29,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742143_1319 (size=12301) 2024-11-07T17:16:29,990 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/192411e99fe0476b977ca1175c97e4e4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/192411e99fe0476b977ca1175c97e4e4 2024-11-07T17:16:29,995 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/C of 9c87afdeea8af3233cd3eafc720d61a6 into 192411e99fe0476b977ca1175c97e4e4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:29,996 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:29,996 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/C, priority=12, startTime=1730999789130; duration=0sec 2024-11-07T17:16:29,996 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:29,996 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:30,035 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/569fda3d046c47c9b6fbdc73ea210c1c 2024-11-07T17:16:30,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:30,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:30,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/2087aabb1d8e4990b70173386723fb5e is 50, key is test_row_0/C:col10/1730999787917/Put/seqid=0 2024-11-07T17:16:30,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742144_1320 (size=12301) 2024-11-07T17:16:30,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999850049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999850050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999850050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999850050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999850051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999850154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999850154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999850154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999850154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999850154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999850356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999850356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999850357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999850357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999850358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,449 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/2087aabb1d8e4990b70173386723fb5e 2024-11-07T17:16:30,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/8410946827dc494d8f29ded45ddb029d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/8410946827dc494d8f29ded45ddb029d 2024-11-07T17:16:30,459 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/8410946827dc494d8f29ded45ddb029d, entries=150, sequenceid=270, filesize=12.0 K 2024-11-07T17:16:30,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/569fda3d046c47c9b6fbdc73ea210c1c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/569fda3d046c47c9b6fbdc73ea210c1c 2024-11-07T17:16:30,463 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/569fda3d046c47c9b6fbdc73ea210c1c, entries=150, sequenceid=270, filesize=12.0 K 2024-11-07T17:16:30,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/2087aabb1d8e4990b70173386723fb5e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2087aabb1d8e4990b70173386723fb5e 2024-11-07T17:16:30,468 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2087aabb1d8e4990b70173386723fb5e, entries=150, sequenceid=270, filesize=12.0 K 2024-11-07T17:16:30,469 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9c87afdeea8af3233cd3eafc720d61a6 in 1255ms, sequenceid=270, compaction requested=false 2024-11-07T17:16:30,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:30,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:30,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-07T17:16:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-07T17:16:30,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-07T17:16:30,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0240 sec 2024-11-07T17:16:30,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 2.0280 sec 2024-11-07T17:16:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T17:16:30,551 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-07T17:16:30,552 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-07T17:16:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-07T17:16:30,554 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:30,554 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:30,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-07T17:16:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:30,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:16:30,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:30,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:30,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:30,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:30,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:30,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:30,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/92eb3eca088e419886b0966601b157e4 is 50, key is test_row_0/A:col10/1730999790659/Put/seqid=0 2024-11-07T17:16:30,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742145_1321 (size=12301) 2024-11-07T17:16:30,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999850674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999850674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999850676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999850677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999850677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:30,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:30,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999850778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999850778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999850779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999850781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999850782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-07T17:16:30,859 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:30,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:30,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:30,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:30,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:30,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:30,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:30,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999850981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999850982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999850982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999850985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:30,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:30,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999850985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,012 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:31,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:31,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/92eb3eca088e419886b0966601b157e4 2024-11-07T17:16:31,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/842139c293e0468ea344ac7e24536a48 is 50, key is test_row_0/B:col10/1730999790659/Put/seqid=0 2024-11-07T17:16:31,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742146_1322 (size=12301) 2024-11-07T17:16:31,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-07T17:16:31,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999851285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999851285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999851285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999851287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999851289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:31,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:31,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:31,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/842139c293e0468ea344ac7e24536a48 2024-11-07T17:16:31,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0c7b072577b547e8b24626f73ad333fd is 50, key is test_row_0/C:col10/1730999790659/Put/seqid=0 2024-11-07T17:16:31,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742147_1323 (size=12301) 2024-11-07T17:16:31,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0c7b072577b547e8b24626f73ad333fd 2024-11-07T17:16:31,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/92eb3eca088e419886b0966601b157e4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/92eb3eca088e419886b0966601b157e4 2024-11-07T17:16:31,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/92eb3eca088e419886b0966601b157e4, entries=150, sequenceid=290, filesize=12.0 K 2024-11-07T17:16:31,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/842139c293e0468ea344ac7e24536a48 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/842139c293e0468ea344ac7e24536a48 2024-11-07T17:16:31,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/842139c293e0468ea344ac7e24536a48, entries=150, sequenceid=290, filesize=12.0 K 2024-11-07T17:16:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0c7b072577b547e8b24626f73ad333fd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0c7b072577b547e8b24626f73ad333fd 2024-11-07T17:16:31,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0c7b072577b547e8b24626f73ad333fd, entries=150, sequenceid=290, filesize=12.0 K 2024-11-07T17:16:31,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 9c87afdeea8af3233cd3eafc720d61a6 in 853ms, sequenceid=290, compaction requested=true 2024-11-07T17:16:31,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:31,515 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:31,515 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:31,516 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:31,516 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:31,516 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/B is initiating minor compaction (all files) 2024-11-07T17:16:31,516 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/A is initiating minor compaction (all files) 2024-11-07T17:16:31,516 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/A in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,516 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/B in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,516 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/109f8dd9e2f24724a81cb3d8140f944d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/8410946827dc494d8f29ded45ddb029d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/92eb3eca088e419886b0966601b157e4] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=36.4 K 2024-11-07T17:16:31,516 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/569fda3d046c47c9b6fbdc73ea210c1c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/842139c293e0468ea344ac7e24536a48] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=36.4 K 2024-11-07T17:16:31,517 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f222f2ecbb6e4bd6aaceddfe1aafa6f4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730999787766 2024-11-07T17:16:31,517 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 109f8dd9e2f24724a81cb3d8140f944d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730999787766 2024-11-07T17:16:31,517 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8410946827dc494d8f29ded45ddb029d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730999787890 2024-11-07T17:16:31,517 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 569fda3d046c47c9b6fbdc73ea210c1c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730999787890 2024-11-07T17:16:31,518 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92eb3eca088e419886b0966601b157e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999790045 2024-11-07T17:16:31,518 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 842139c293e0468ea344ac7e24536a48, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999790045 2024-11-07T17:16:31,525 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#B#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:31,525 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#A#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:31,526 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/c10dc60e8f5e4221a36464d827a6f466 is 50, key is test_row_0/B:col10/1730999790659/Put/seqid=0 2024-11-07T17:16:31,526 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/39f7dac4f64e477fbe4c1566835859b9 is 50, key is test_row_0/A:col10/1730999790659/Put/seqid=0 2024-11-07T17:16:31,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742149_1325 (size=12949) 2024-11-07T17:16:31,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742148_1324 (size=12949) 2024-11-07T17:16:31,624 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-07T17:16:31,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,624 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:16:31,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:31,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:31,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:31,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:31,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:31,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:31,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/d5005c693c4340328964ef37cbb52864 is 50, key is test_row_0/A:col10/1730999790676/Put/seqid=0 2024-11-07T17:16:31,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742150_1326 (size=12301) 2024-11-07T17:16:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-07T17:16:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:31,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:31,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999851799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999851800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999851801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999851801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999851802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999851903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999851904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999851904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999851905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999851906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:31,953 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/c10dc60e8f5e4221a36464d827a6f466 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/c10dc60e8f5e4221a36464d827a6f466 2024-11-07T17:16:31,953 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/39f7dac4f64e477fbe4c1566835859b9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/39f7dac4f64e477fbe4c1566835859b9 2024-11-07T17:16:31,958 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/B of 9c87afdeea8af3233cd3eafc720d61a6 into c10dc60e8f5e4221a36464d827a6f466(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:31,958 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/A of 9c87afdeea8af3233cd3eafc720d61a6 into 39f7dac4f64e477fbe4c1566835859b9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:31,958 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:31,958 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:31,959 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/B, priority=13, startTime=1730999791515; duration=0sec 2024-11-07T17:16:31,959 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/A, priority=13, startTime=1730999791514; duration=0sec 2024-11-07T17:16:31,959 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:31,959 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:31,959 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:31,959 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:31,959 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:31,960 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:31,960 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/C is initiating minor compaction (all files) 2024-11-07T17:16:31,960 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/C in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:31,960 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/192411e99fe0476b977ca1175c97e4e4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2087aabb1d8e4990b70173386723fb5e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0c7b072577b547e8b24626f73ad333fd] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=36.4 K 2024-11-07T17:16:31,960 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 192411e99fe0476b977ca1175c97e4e4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730999787766 2024-11-07T17:16:31,961 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2087aabb1d8e4990b70173386723fb5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730999787890 2024-11-07T17:16:31,961 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c7b072577b547e8b24626f73ad333fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999790045 2024-11-07T17:16:31,968 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#C#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:31,969 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/f0802187b501468a82741c9611e6aecb is 50, key is test_row_0/C:col10/1730999790659/Put/seqid=0 2024-11-07T17:16:31,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742151_1327 (size=12949) 2024-11-07T17:16:32,034 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/d5005c693c4340328964ef37cbb52864 2024-11-07T17:16:32,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/dc199197f5d64517be97d4b010b839cc is 50, key is test_row_0/B:col10/1730999790676/Put/seqid=0 2024-11-07T17:16:32,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742152_1328 (size=12301) 2024-11-07T17:16:32,046 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/dc199197f5d64517be97d4b010b839cc 2024-11-07T17:16:32,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/de674a09c8634fc4baba600dc731aeb7 is 50, key is test_row_0/C:col10/1730999790676/Put/seqid=0 2024-11-07T17:16:32,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742153_1329 (size=12301) 2024-11-07T17:16:32,058 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/de674a09c8634fc4baba600dc731aeb7 2024-11-07T17:16:32,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/d5005c693c4340328964ef37cbb52864 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d5005c693c4340328964ef37cbb52864 2024-11-07T17:16:32,075 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d5005c693c4340328964ef37cbb52864, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T17:16:32,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/dc199197f5d64517be97d4b010b839cc as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/dc199197f5d64517be97d4b010b839cc 2024-11-07T17:16:32,081 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/dc199197f5d64517be97d4b010b839cc, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T17:16:32,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/de674a09c8634fc4baba600dc731aeb7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/de674a09c8634fc4baba600dc731aeb7 2024-11-07T17:16:32,087 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/de674a09c8634fc4baba600dc731aeb7, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T17:16:32,088 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 9c87afdeea8af3233cd3eafc720d61a6 in 464ms, sequenceid=309, compaction requested=false 2024-11-07T17:16:32,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:32,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:32,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-07T17:16:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-07T17:16:32,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-07T17:16:32,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5350 sec 2024-11-07T17:16:32,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.5400 sec 2024-11-07T17:16:32,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-07T17:16:32,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:32,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:32,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:32,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:32,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/a1de2a6f4f1b41a2bc08cafe543816e2 is 50, key is test_row_0/A:col10/1730999791799/Put/seqid=0 2024-11-07T17:16:32,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742154_1330 (size=14741) 2024-11-07T17:16:32,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/a1de2a6f4f1b41a2bc08cafe543816e2 2024-11-07T17:16:32,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999852123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999852123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999852124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999852126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999852127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/9f8459915b1a4798b9a2d361b6a97155 is 50, key is test_row_0/B:col10/1730999791799/Put/seqid=0 2024-11-07T17:16:32,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742155_1331 (size=12301) 2024-11-07T17:16:32,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/9f8459915b1a4798b9a2d361b6a97155 2024-11-07T17:16:32,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/d768c91ecbba4ab7a547bb0bbf0bf667 is 50, key is test_row_0/C:col10/1730999791799/Put/seqid=0 2024-11-07T17:16:32,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742156_1332 (size=12301) 2024-11-07T17:16:32,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/d768c91ecbba4ab7a547bb0bbf0bf667 2024-11-07T17:16:32,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/a1de2a6f4f1b41a2bc08cafe543816e2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/a1de2a6f4f1b41a2bc08cafe543816e2 2024-11-07T17:16:32,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/a1de2a6f4f1b41a2bc08cafe543816e2, entries=200, sequenceid=329, filesize=14.4 K 2024-11-07T17:16:32,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/9f8459915b1a4798b9a2d361b6a97155 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9f8459915b1a4798b9a2d361b6a97155 2024-11-07T17:16:32,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9f8459915b1a4798b9a2d361b6a97155, entries=150, sequenceid=329, filesize=12.0 K 2024-11-07T17:16:32,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/d768c91ecbba4ab7a547bb0bbf0bf667 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/d768c91ecbba4ab7a547bb0bbf0bf667 2024-11-07T17:16:32,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/d768c91ecbba4ab7a547bb0bbf0bf667, entries=150, sequenceid=329, filesize=12.0 K 2024-11-07T17:16:32,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 9c87afdeea8af3233cd3eafc720d61a6 in 78ms, sequenceid=329, compaction requested=true 2024-11-07T17:16:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:32,188 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:16:32,188 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:32,189 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/A is initiating minor compaction (all files) 2024-11-07T17:16:32,189 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/A in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:32,189 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/39f7dac4f64e477fbe4c1566835859b9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d5005c693c4340328964ef37cbb52864, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/a1de2a6f4f1b41a2bc08cafe543816e2] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=39.1 K 2024-11-07T17:16:32,189 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39f7dac4f64e477fbe4c1566835859b9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999790045 2024-11-07T17:16:32,189 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5005c693c4340328964ef37cbb52864, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999790673 2024-11-07T17:16:32,190 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1de2a6f4f1b41a2bc08cafe543816e2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1730999791796 2024-11-07T17:16:32,198 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#A#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:32,199 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/e925f984810049da86e402b97fb41cec is 50, key is test_row_0/A:col10/1730999791799/Put/seqid=0 2024-11-07T17:16:32,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742157_1333 (size=13051) 2024-11-07T17:16:32,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:32,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-07T17:16:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999852246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999852247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999852248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999852248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999852249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/883ec416cd41497e871427e68aa9a473 is 50, key is test_row_0/A:col10/1730999792116/Put/seqid=0 2024-11-07T17:16:32,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742158_1334 (size=14741) 2024-11-07T17:16:32,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999852353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999852354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999852357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999852357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999852358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,386 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/f0802187b501468a82741c9611e6aecb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/f0802187b501468a82741c9611e6aecb 2024-11-07T17:16:32,392 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/C of 9c87afdeea8af3233cd3eafc720d61a6 into f0802187b501468a82741c9611e6aecb(size=12.6 K), total size for store is 36.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:32,392 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:32,392 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/C, priority=13, startTime=1730999791515; duration=0sec 2024-11-07T17:16:32,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:16:32,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:32,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:32,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:32,395 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:32,395 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/B is initiating minor compaction (all files) 2024-11-07T17:16:32,395 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/B in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:32,395 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/c10dc60e8f5e4221a36464d827a6f466, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/dc199197f5d64517be97d4b010b839cc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9f8459915b1a4798b9a2d361b6a97155] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=36.7 K 2024-11-07T17:16:32,396 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c10dc60e8f5e4221a36464d827a6f466, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999790045 2024-11-07T17:16:32,396 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting dc199197f5d64517be97d4b010b839cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999790673 2024-11-07T17:16:32,397 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f8459915b1a4798b9a2d361b6a97155, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1730999791799 2024-11-07T17:16:32,405 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#B#compaction#284 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:32,406 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/e92f9c8b2e7a4f62944df02eeef9b9b4 is 50, key is test_row_0/B:col10/1730999791799/Put/seqid=0 2024-11-07T17:16:32,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742159_1335 (size=13051) 2024-11-07T17:16:32,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999852555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999852559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999852559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999852560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999852560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,624 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/e925f984810049da86e402b97fb41cec as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/e925f984810049da86e402b97fb41cec 2024-11-07T17:16:32,630 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/A of 9c87afdeea8af3233cd3eafc720d61a6 into e925f984810049da86e402b97fb41cec(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:32,630 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:32,630 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/A, priority=13, startTime=1730999792187; duration=0sec 2024-11-07T17:16:32,630 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:32,630 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:32,630 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:32,632 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:32,632 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 9c87afdeea8af3233cd3eafc720d61a6/C is initiating minor compaction (all files) 2024-11-07T17:16:32,632 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9c87afdeea8af3233cd3eafc720d61a6/C in TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:32,632 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/f0802187b501468a82741c9611e6aecb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/de674a09c8634fc4baba600dc731aeb7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/d768c91ecbba4ab7a547bb0bbf0bf667] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp, totalSize=36.7 K 2024-11-07T17:16:32,632 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0802187b501468a82741c9611e6aecb, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999790045 2024-11-07T17:16:32,633 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting de674a09c8634fc4baba600dc731aeb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730999790673 2024-11-07T17:16:32,634 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d768c91ecbba4ab7a547bb0bbf0bf667, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1730999791799 2024-11-07T17:16:32,644 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c87afdeea8af3233cd3eafc720d61a6#C#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:32,645 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/b5ae047995a74abba7950461adb8daa5 is 50, key is test_row_0/C:col10/1730999791799/Put/seqid=0 2024-11-07T17:16:32,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742160_1336 (size=13051) 2024-11-07T17:16:32,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-07T17:16:32,658 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-07T17:16:32,660 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:32,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-07T17:16:32,662 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:32,663 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:32,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:32,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T17:16:32,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/883ec416cd41497e871427e68aa9a473 2024-11-07T17:16:32,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d4ef0b6f8189427aa1aa7a59a480967c is 50, key is test_row_0/B:col10/1730999792116/Put/seqid=0 2024-11-07T17:16:32,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742161_1337 (size=12301) 2024-11-07T17:16:32,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d4ef0b6f8189427aa1aa7a59a480967c 2024-11-07T17:16:32,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/15254af0d7394574bae70e4055d727ff is 50, key is test_row_0/C:col10/1730999792116/Put/seqid=0 2024-11-07T17:16:32,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742162_1338 (size=12301) 2024-11-07T17:16:32,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/15254af0d7394574bae70e4055d727ff 2024-11-07T17:16:32,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/883ec416cd41497e871427e68aa9a473 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/883ec416cd41497e871427e68aa9a473 2024-11-07T17:16:32,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/883ec416cd41497e871427e68aa9a473, entries=200, sequenceid=349, filesize=14.4 K 2024-11-07T17:16:32,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d4ef0b6f8189427aa1aa7a59a480967c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d4ef0b6f8189427aa1aa7a59a480967c 2024-11-07T17:16:32,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d4ef0b6f8189427aa1aa7a59a480967c, entries=150, sequenceid=349, filesize=12.0 K 2024-11-07T17:16:32,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/15254af0d7394574bae70e4055d727ff as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/15254af0d7394574bae70e4055d727ff 2024-11-07T17:16:32,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T17:16:32,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/15254af0d7394574bae70e4055d727ff, entries=150, sequenceid=349, filesize=12.0 K 2024-11-07T17:16:32,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 9c87afdeea8af3233cd3eafc720d61a6 in 536ms, sequenceid=349, compaction requested=false 2024-11-07T17:16:32,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:32,818 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-07T17:16:32,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:32,818 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T17:16:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:32,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/44b5bdb4eb14479b93fc3cf58ec56e24 is 50, key is test_row_0/A:col10/1730999792246/Put/seqid=0 2024-11-07T17:16:32,831 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/e92f9c8b2e7a4f62944df02eeef9b9b4 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/e92f9c8b2e7a4f62944df02eeef9b9b4 2024-11-07T17:16:32,838 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/B of 9c87afdeea8af3233cd3eafc720d61a6 into e92f9c8b2e7a4f62944df02eeef9b9b4(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:32,838 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:32,838 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/B, priority=13, startTime=1730999792188; duration=0sec 2024-11-07T17:16:32,838 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:32,838 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:32,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742163_1339 (size=12301) 2024-11-07T17:16:32,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:32,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. as already flushing 2024-11-07T17:16:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999852916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999852916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999852917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999852918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:32,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999852920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:32,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T17:16:33,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999853021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999853022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999853022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999853022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999853024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,064 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/b5ae047995a74abba7950461adb8daa5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b5ae047995a74abba7950461adb8daa5 2024-11-07T17:16:33,071 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9c87afdeea8af3233cd3eafc720d61a6/C of 9c87afdeea8af3233cd3eafc720d61a6 into b5ae047995a74abba7950461adb8daa5(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:33,071 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:33,071 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6., storeName=9c87afdeea8af3233cd3eafc720d61a6/C, priority=13, startTime=1730999792188; duration=0sec 2024-11-07T17:16:33,071 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:33,072 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:33,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999853225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999853226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999853226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999853226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999853228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,240 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/44b5bdb4eb14479b93fc3cf58ec56e24 2024-11-07T17:16:33,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d58a1cef976241ab8a71735fdaf522b2 is 50, key is test_row_0/B:col10/1730999792246/Put/seqid=0 2024-11-07T17:16:33,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T17:16:33,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742164_1340 (size=12301) 2024-11-07T17:16:33,319 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d58a1cef976241ab8a71735fdaf522b2 2024-11-07T17:16:33,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0694d81003dc43c2988f00ba2963591f is 50, key is test_row_0/C:col10/1730999792246/Put/seqid=0 2024-11-07T17:16:33,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742165_1341 (size=12301) 2024-11-07T17:16:33,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41412 deadline: 1730999853531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41366 deadline: 1730999853531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41410 deadline: 1730999853531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41390 deadline: 1730999853531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41418 deadline: 1730999853532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:33,697 DEBUG [Thread-1235 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3bf0ba59 to 127.0.0.1:64938 2024-11-07T17:16:33,697 DEBUG [Thread-1235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:33,698 DEBUG [Thread-1229 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:64938 2024-11-07T17:16:33,698 DEBUG [Thread-1229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:33,698 DEBUG [Thread-1231 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:64938 2024-11-07T17:16:33,699 DEBUG [Thread-1231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:33,700 DEBUG [Thread-1233 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d1403c3 to 127.0.0.1:64938 2024-11-07T17:16:33,700 DEBUG [Thread-1233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:33,703 DEBUG [Thread-1227 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:64938 2024-11-07T17:16:33,703 DEBUG [Thread-1227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T17:16:33,795 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0694d81003dc43c2988f00ba2963591f 2024-11-07T17:16:33,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/44b5bdb4eb14479b93fc3cf58ec56e24 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/44b5bdb4eb14479b93fc3cf58ec56e24 2024-11-07T17:16:33,812 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/44b5bdb4eb14479b93fc3cf58ec56e24, entries=150, sequenceid=368, filesize=12.0 K 2024-11-07T17:16:33,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d58a1cef976241ab8a71735fdaf522b2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d58a1cef976241ab8a71735fdaf522b2 2024-11-07T17:16:33,820 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d58a1cef976241ab8a71735fdaf522b2, entries=150, sequenceid=368, filesize=12.0 K 2024-11-07T17:16:33,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/0694d81003dc43c2988f00ba2963591f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0694d81003dc43c2988f00ba2963591f 2024-11-07T17:16:33,827 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0694d81003dc43c2988f00ba2963591f, entries=150, sequenceid=368, filesize=12.0 K 2024-11-07T17:16:33,828 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 9c87afdeea8af3233cd3eafc720d61a6 in 1010ms, sequenceid=368, compaction requested=true 2024-11-07T17:16:33,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:33,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:33,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-07T17:16:33,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-07T17:16:33,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-07T17:16:33,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1670 sec 2024-11-07T17:16:33,833 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.1720 sec 2024-11-07T17:16:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:34,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:16:34,043 DEBUG [Thread-1216 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:64938 2024-11-07T17:16:34,043 DEBUG [Thread-1216 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:34,043 DEBUG [Thread-1224 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:64938 2024-11-07T17:16:34,043 DEBUG [Thread-1224 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:34,044 DEBUG [Thread-1220 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:64938 2024-11-07T17:16:34,044 DEBUG [Thread-1220 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:34,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:34,045 DEBUG [Thread-1222 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:64938 2024-11-07T17:16:34,045 DEBUG [Thread-1222 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:34,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:34,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:34,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:34,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:34,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:34,048 DEBUG [Thread-1218 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:64938 2024-11-07T17:16:34,048 DEBUG [Thread-1218 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:34,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/525d67dbb0564c72bde9b00b74e96d89 is 50, key is test_row_0/A:col10/1730999794037/Put/seqid=0 2024-11-07T17:16:34,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742166_1342 (size=12301) 2024-11-07T17:16:34,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/525d67dbb0564c72bde9b00b74e96d89 2024-11-07T17:16:34,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d0813e709a0b46d9b994e71a56090864 is 50, key is test_row_0/B:col10/1730999794037/Put/seqid=0 2024-11-07T17:16:34,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742167_1343 (size=12301) 2024-11-07T17:16:34,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T17:16:34,770 INFO [Thread-1226 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6655 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6491 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6523 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6678 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6478 2024-11-07T17:16:34,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T17:16:34,770 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:16:34,770 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:64938 2024-11-07T17:16:34,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:16:34,771 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T17:16:34,771 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T17:16:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-07T17:16:34,774 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999794774"}]},"ts":"1730999794774"} 2024-11-07T17:16:34,775 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T17:16:34,778 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T17:16:34,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:16:34,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, UNASSIGN}] 2024-11-07T17:16:34,780 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, UNASSIGN 2024-11-07T17:16:34,780 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=9c87afdeea8af3233cd3eafc720d61a6, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:34,781 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:16:34,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:16:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-07T17:16:34,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d0813e709a0b46d9b994e71a56090864 2024-11-07T17:16:34,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/e1c08a334f1444d8b8d2c74c424455d8 is 50, key is test_row_0/C:col10/1730999794037/Put/seqid=0 2024-11-07T17:16:34,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742168_1344 (size=12301) 2024-11-07T17:16:34,932 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:34,933 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:34,933 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:16:34,933 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 9c87afdeea8af3233cd3eafc720d61a6, disabling compactions & flushes 2024-11-07T17:16:34,933 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-07T17:16:35,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/e1c08a334f1444d8b8d2c74c424455d8 2024-11-07T17:16:35,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/525d67dbb0564c72bde9b00b74e96d89 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/525d67dbb0564c72bde9b00b74e96d89 2024-11-07T17:16:35,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/525d67dbb0564c72bde9b00b74e96d89, entries=150, sequenceid=393, filesize=12.0 K 2024-11-07T17:16:35,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/d0813e709a0b46d9b994e71a56090864 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d0813e709a0b46d9b994e71a56090864 2024-11-07T17:16:35,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d0813e709a0b46d9b994e71a56090864, entries=150, sequenceid=393, filesize=12.0 K 2024-11-07T17:16:35,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/e1c08a334f1444d8b8d2c74c424455d8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/e1c08a334f1444d8b8d2c74c424455d8 2024-11-07T17:16:35,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/e1c08a334f1444d8b8d2c74c424455d8, entries=150, sequenceid=393, filesize=12.0 K 2024-11-07T17:16:35,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=6.71 KB/6870 for 9c87afdeea8af3233cd3eafc720d61a6 in 1275ms, sequenceid=393, compaction requested=true 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:35,317 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:35,317 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:35,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. because compaction request was cancelled 2024-11-07T17:16:35,317 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. after waiting 0 ms 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:35,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:A 2024-11-07T17:16:35,317 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:35,317 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. because compaction request was cancelled 2024-11-07T17:16:35,317 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:B 2024-11-07T17:16:35,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. because compaction request was cancelled 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c87afdeea8af3233cd3eafc720d61a6:C, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:35,317 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 9c87afdeea8af3233cd3eafc720d61a6 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-07T17:16:35,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c87afdeea8af3233cd3eafc720d61a6:C 2024-11-07T17:16:35,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:35,318 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=A 2024-11-07T17:16:35,318 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:35,318 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=B 2024-11-07T17:16:35,318 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:35,318 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9c87afdeea8af3233cd3eafc720d61a6, store=C 2024-11-07T17:16:35,318 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:35,321 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/df11a1dd3d254d6cac15971e832bc2d1 is 50, key is test_row_0/A:col10/1730999794046/Put/seqid=0 2024-11-07T17:16:35,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742169_1345 (size=7415) 2024-11-07T17:16:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-07T17:16:35,724 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/df11a1dd3d254d6cac15971e832bc2d1 2024-11-07T17:16:35,731 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/fca384905cb64486b789ac7e0be7f530 is 50, key is test_row_0/B:col10/1730999794046/Put/seqid=0 2024-11-07T17:16:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742170_1346 (size=7415) 2024-11-07T17:16:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-07T17:16:36,135 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/fca384905cb64486b789ac7e0be7f530 2024-11-07T17:16:36,141 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/8b7b8503f23141efa02c5988e4458726 is 50, key is test_row_0/C:col10/1730999794046/Put/seqid=0 2024-11-07T17:16:36,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742171_1347 (size=7415) 2024-11-07T17:16:36,545 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/8b7b8503f23141efa02c5988e4458726 2024-11-07T17:16:36,548 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/A/df11a1dd3d254d6cac15971e832bc2d1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/df11a1dd3d254d6cac15971e832bc2d1 2024-11-07T17:16:36,551 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/df11a1dd3d254d6cac15971e832bc2d1, entries=50, sequenceid=397, filesize=7.2 K 2024-11-07T17:16:36,552 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/B/fca384905cb64486b789ac7e0be7f530 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/fca384905cb64486b789ac7e0be7f530 2024-11-07T17:16:36,555 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/fca384905cb64486b789ac7e0be7f530, entries=50, sequenceid=397, filesize=7.2 K 2024-11-07T17:16:36,555 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/.tmp/C/8b7b8503f23141efa02c5988e4458726 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8b7b8503f23141efa02c5988e4458726 2024-11-07T17:16:36,558 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8b7b8503f23141efa02c5988e4458726, entries=50, sequenceid=397, filesize=7.2 K 2024-11-07T17:16:36,559 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for 9c87afdeea8af3233cd3eafc720d61a6 in 1242ms, sequenceid=397, compaction requested=true 2024-11-07T17:16:36,559 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4493a26b8e3e4c47b5973b98fe94bdf7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/84bef5016e554cffb5c914aa153fa625, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2a61f1c7b58c4de9be6dd4f80e91a2f9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/c08233e8a87f47058d0f8054dd1a3144, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d9505e1549ad42f78bf9c88139fba59b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0133bb07e9cb4a01b161b1f02137a87f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2ace928bd4034989986e4935c50778ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4b05aa30f3114374a59d6eaa782c7da0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0d2522ac2a6c425b940caa07d863944f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4fd1160f16854e218e441c9540d8db69, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/cfacc44c6c3a48fea867e45ea79ea33e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/278c1f0eebb741a08fe9dfc62d814108, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f7515d7a539a43a29b492e34230f0ff0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f5df15f99d25450c9518480fd225a7b5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/396a914d12a640ca98b164d47bff9f1d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2491ac629b4142bf92c81a87932bb76f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/109f8dd9e2f24724a81cb3d8140f944d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/8410946827dc494d8f29ded45ddb029d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/39f7dac4f64e477fbe4c1566835859b9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/92eb3eca088e419886b0966601b157e4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d5005c693c4340328964ef37cbb52864, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/a1de2a6f4f1b41a2bc08cafe543816e2] to archive 2024-11-07T17:16:36,560 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:16:36,562 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4493a26b8e3e4c47b5973b98fe94bdf7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4493a26b8e3e4c47b5973b98fe94bdf7 2024-11-07T17:16:36,563 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/84bef5016e554cffb5c914aa153fa625 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/84bef5016e554cffb5c914aa153fa625 2024-11-07T17:16:36,564 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2a61f1c7b58c4de9be6dd4f80e91a2f9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2a61f1c7b58c4de9be6dd4f80e91a2f9 2024-11-07T17:16:36,565 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/c08233e8a87f47058d0f8054dd1a3144 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/c08233e8a87f47058d0f8054dd1a3144 2024-11-07T17:16:36,566 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d9505e1549ad42f78bf9c88139fba59b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d9505e1549ad42f78bf9c88139fba59b 2024-11-07T17:16:36,566 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0133bb07e9cb4a01b161b1f02137a87f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0133bb07e9cb4a01b161b1f02137a87f 2024-11-07T17:16:36,567 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2ace928bd4034989986e4935c50778ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2ace928bd4034989986e4935c50778ab 2024-11-07T17:16:36,568 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4b05aa30f3114374a59d6eaa782c7da0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4b05aa30f3114374a59d6eaa782c7da0 2024-11-07T17:16:36,569 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0d2522ac2a6c425b940caa07d863944f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/0d2522ac2a6c425b940caa07d863944f 2024-11-07T17:16:36,570 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4fd1160f16854e218e441c9540d8db69 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/4fd1160f16854e218e441c9540d8db69 2024-11-07T17:16:36,571 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/cfacc44c6c3a48fea867e45ea79ea33e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/cfacc44c6c3a48fea867e45ea79ea33e 2024-11-07T17:16:36,571 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/278c1f0eebb741a08fe9dfc62d814108 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/278c1f0eebb741a08fe9dfc62d814108 2024-11-07T17:16:36,572 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f7515d7a539a43a29b492e34230f0ff0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f7515d7a539a43a29b492e34230f0ff0 2024-11-07T17:16:36,573 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f5df15f99d25450c9518480fd225a7b5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/f5df15f99d25450c9518480fd225a7b5 2024-11-07T17:16:36,574 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/396a914d12a640ca98b164d47bff9f1d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/396a914d12a640ca98b164d47bff9f1d 2024-11-07T17:16:36,575 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2491ac629b4142bf92c81a87932bb76f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/2491ac629b4142bf92c81a87932bb76f 2024-11-07T17:16:36,576 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/109f8dd9e2f24724a81cb3d8140f944d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/109f8dd9e2f24724a81cb3d8140f944d 2024-11-07T17:16:36,577 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/8410946827dc494d8f29ded45ddb029d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/8410946827dc494d8f29ded45ddb029d 2024-11-07T17:16:36,578 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/39f7dac4f64e477fbe4c1566835859b9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/39f7dac4f64e477fbe4c1566835859b9 2024-11-07T17:16:36,578 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/92eb3eca088e419886b0966601b157e4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/92eb3eca088e419886b0966601b157e4 2024-11-07T17:16:36,579 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d5005c693c4340328964ef37cbb52864 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/d5005c693c4340328964ef37cbb52864 2024-11-07T17:16:36,580 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/a1de2a6f4f1b41a2bc08cafe543816e2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/a1de2a6f4f1b41a2bc08cafe543816e2 2024-11-07T17:16:36,581 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a3d5cec1e1d54196bb7842ac2dd57344, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/edeac1a7b2f94e76b10a7bdccbda3abc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/121f765cdca747508ae978428ceab6a6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/670e4762aef54ce0bfbc608e4a84a059, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/cca826f9ea514011a01d87662c0f0e57, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/52d6aa35cb5f409b8ff9172888da3567, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/194d4bbd3adc42b1b9a78a0666056ed6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/74220a5a11bb48369bff4d161ee06ead, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9b0879db2890468ab1cded4b48544b43, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a92e5f57b35d439fb85009c7bc1244ea, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/25ec7eb2ade24f868f24e6e5be0d71d5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/56c975f3dc1444f8b82af66633e23a57, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a85a7b63cb8848b98c7c471443af9631, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/5f3fe90544c643d385d17c8745db9098, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/83ad8a529f6940138e45b26064f16a93, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/7c1cfb4238f64bc9a8da63c22a46160a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/569fda3d046c47c9b6fbdc73ea210c1c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/c10dc60e8f5e4221a36464d827a6f466, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/842139c293e0468ea344ac7e24536a48, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/dc199197f5d64517be97d4b010b839cc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9f8459915b1a4798b9a2d361b6a97155] to archive 2024-11-07T17:16:36,582 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:16:36,583 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a3d5cec1e1d54196bb7842ac2dd57344 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a3d5cec1e1d54196bb7842ac2dd57344 2024-11-07T17:16:36,584 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/edeac1a7b2f94e76b10a7bdccbda3abc to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/edeac1a7b2f94e76b10a7bdccbda3abc 2024-11-07T17:16:36,585 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/121f765cdca747508ae978428ceab6a6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/121f765cdca747508ae978428ceab6a6 2024-11-07T17:16:36,586 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/670e4762aef54ce0bfbc608e4a84a059 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/670e4762aef54ce0bfbc608e4a84a059 2024-11-07T17:16:36,586 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/cca826f9ea514011a01d87662c0f0e57 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/cca826f9ea514011a01d87662c0f0e57 2024-11-07T17:16:36,587 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/52d6aa35cb5f409b8ff9172888da3567 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/52d6aa35cb5f409b8ff9172888da3567 2024-11-07T17:16:36,588 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/194d4bbd3adc42b1b9a78a0666056ed6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/194d4bbd3adc42b1b9a78a0666056ed6 2024-11-07T17:16:36,589 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/74220a5a11bb48369bff4d161ee06ead to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/74220a5a11bb48369bff4d161ee06ead 2024-11-07T17:16:36,589 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9b0879db2890468ab1cded4b48544b43 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9b0879db2890468ab1cded4b48544b43 2024-11-07T17:16:36,590 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a92e5f57b35d439fb85009c7bc1244ea to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a92e5f57b35d439fb85009c7bc1244ea 2024-11-07T17:16:36,591 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/25ec7eb2ade24f868f24e6e5be0d71d5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/25ec7eb2ade24f868f24e6e5be0d71d5 2024-11-07T17:16:36,592 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/56c975f3dc1444f8b82af66633e23a57 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/56c975f3dc1444f8b82af66633e23a57 2024-11-07T17:16:36,593 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a85a7b63cb8848b98c7c471443af9631 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/a85a7b63cb8848b98c7c471443af9631 2024-11-07T17:16:36,594 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/5f3fe90544c643d385d17c8745db9098 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/5f3fe90544c643d385d17c8745db9098 2024-11-07T17:16:36,595 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/83ad8a529f6940138e45b26064f16a93 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/83ad8a529f6940138e45b26064f16a93 2024-11-07T17:16:36,595 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/f222f2ecbb6e4bd6aaceddfe1aafa6f4 2024-11-07T17:16:36,596 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/7c1cfb4238f64bc9a8da63c22a46160a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/7c1cfb4238f64bc9a8da63c22a46160a 2024-11-07T17:16:36,597 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/569fda3d046c47c9b6fbdc73ea210c1c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/569fda3d046c47c9b6fbdc73ea210c1c 2024-11-07T17:16:36,598 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/c10dc60e8f5e4221a36464d827a6f466 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/c10dc60e8f5e4221a36464d827a6f466 2024-11-07T17:16:36,599 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/842139c293e0468ea344ac7e24536a48 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/842139c293e0468ea344ac7e24536a48 2024-11-07T17:16:36,600 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/dc199197f5d64517be97d4b010b839cc to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/dc199197f5d64517be97d4b010b839cc 2024-11-07T17:16:36,601 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9f8459915b1a4798b9a2d361b6a97155 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/9f8459915b1a4798b9a2d361b6a97155 2024-11-07T17:16:36,602 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/94fad211cd2e43d8b5f5074371a6ecad, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9c71c4167e274840a6b50914434c5c32, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/aa8e005ee65a4f729b4dfabdd5bdba1b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/c80be1d714c747f986993d1cfaabcfe4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2d270d77430d40d781ed75d74e0a71d8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0a9dd42366144d1fa88b0d0cfbeca3fc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9f31135fbd234a0980023daeaa0b7f88, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/dfe3d34e71a34a70b9e8d0134ff8a6bb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/eba3a9abacac48d1aeb51bee8fadd0a5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/6b1161e5ac1347d8a5540bfa50e95f05, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/829607ce4c7e49a3ac830b2e7ab97094, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b8e70d7a73604936bdfc0ef589582ed0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/69a5afd5e6f240e2bd61d62581c24fe8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/3138abf26d0a4eabba220d50a84be059, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/a07ac562e4f54574bbf3622d759c1e1b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/192411e99fe0476b977ca1175c97e4e4, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8e09f5c129f349ada4f59872142958a8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2087aabb1d8e4990b70173386723fb5e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/f0802187b501468a82741c9611e6aecb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0c7b072577b547e8b24626f73ad333fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/de674a09c8634fc4baba600dc731aeb7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/d768c91ecbba4ab7a547bb0bbf0bf667] to archive 2024-11-07T17:16:36,602 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:16:36,604 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/94fad211cd2e43d8b5f5074371a6ecad to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/94fad211cd2e43d8b5f5074371a6ecad 2024-11-07T17:16:36,605 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9c71c4167e274840a6b50914434c5c32 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9c71c4167e274840a6b50914434c5c32 2024-11-07T17:16:36,606 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/aa8e005ee65a4f729b4dfabdd5bdba1b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/aa8e005ee65a4f729b4dfabdd5bdba1b 2024-11-07T17:16:36,606 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/c80be1d714c747f986993d1cfaabcfe4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/c80be1d714c747f986993d1cfaabcfe4 2024-11-07T17:16:36,607 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2d270d77430d40d781ed75d74e0a71d8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2d270d77430d40d781ed75d74e0a71d8 2024-11-07T17:16:36,608 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0a9dd42366144d1fa88b0d0cfbeca3fc to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0a9dd42366144d1fa88b0d0cfbeca3fc 2024-11-07T17:16:36,609 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9f31135fbd234a0980023daeaa0b7f88 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/9f31135fbd234a0980023daeaa0b7f88 2024-11-07T17:16:36,610 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/dfe3d34e71a34a70b9e8d0134ff8a6bb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/dfe3d34e71a34a70b9e8d0134ff8a6bb 2024-11-07T17:16:36,611 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/eba3a9abacac48d1aeb51bee8fadd0a5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/eba3a9abacac48d1aeb51bee8fadd0a5 2024-11-07T17:16:36,612 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/6b1161e5ac1347d8a5540bfa50e95f05 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/6b1161e5ac1347d8a5540bfa50e95f05 2024-11-07T17:16:36,613 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/829607ce4c7e49a3ac830b2e7ab97094 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/829607ce4c7e49a3ac830b2e7ab97094 2024-11-07T17:16:36,614 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b8e70d7a73604936bdfc0ef589582ed0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b8e70d7a73604936bdfc0ef589582ed0 2024-11-07T17:16:36,614 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/69a5afd5e6f240e2bd61d62581c24fe8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/69a5afd5e6f240e2bd61d62581c24fe8 2024-11-07T17:16:36,615 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/3138abf26d0a4eabba220d50a84be059 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/3138abf26d0a4eabba220d50a84be059 2024-11-07T17:16:36,616 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/a07ac562e4f54574bbf3622d759c1e1b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/a07ac562e4f54574bbf3622d759c1e1b 2024-11-07T17:16:36,617 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/192411e99fe0476b977ca1175c97e4e4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/192411e99fe0476b977ca1175c97e4e4 2024-11-07T17:16:36,618 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8e09f5c129f349ada4f59872142958a8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8e09f5c129f349ada4f59872142958a8 2024-11-07T17:16:36,619 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2087aabb1d8e4990b70173386723fb5e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/2087aabb1d8e4990b70173386723fb5e 2024-11-07T17:16:36,619 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/f0802187b501468a82741c9611e6aecb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/f0802187b501468a82741c9611e6aecb 2024-11-07T17:16:36,620 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0c7b072577b547e8b24626f73ad333fd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0c7b072577b547e8b24626f73ad333fd 2024-11-07T17:16:36,621 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/de674a09c8634fc4baba600dc731aeb7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/de674a09c8634fc4baba600dc731aeb7 2024-11-07T17:16:36,622 DEBUG [StoreCloser-TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/d768c91ecbba4ab7a547bb0bbf0bf667 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/d768c91ecbba4ab7a547bb0bbf0bf667 2024-11-07T17:16:36,625 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/recovered.edits/400.seqid, newMaxSeqId=400, maxSeqId=1 2024-11-07T17:16:36,626 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6. 2024-11-07T17:16:36,626 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 9c87afdeea8af3233cd3eafc720d61a6: 2024-11-07T17:16:36,627 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:36,627 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=9c87afdeea8af3233cd3eafc720d61a6, regionState=CLOSED 2024-11-07T17:16:36,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-07T17:16:36,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 9c87afdeea8af3233cd3eafc720d61a6, server=3a0fde618c86,37403,1730999712734 in 1.8470 sec 2024-11-07T17:16:36,630 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-07T17:16:36,630 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9c87afdeea8af3233cd3eafc720d61a6, UNASSIGN in 1.8500 sec 2024-11-07T17:16:36,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-07T17:16:36,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8520 sec 2024-11-07T17:16:36,632 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999796632"}]},"ts":"1730999796632"} 2024-11-07T17:16:36,632 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T17:16:36,634 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T17:16:36,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8640 sec 2024-11-07T17:16:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-07T17:16:36,878 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-07T17:16:36,879 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T17:16:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:36,880 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-07T17:16:36,880 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:36,883 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:36,884 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/recovered.edits] 2024-11-07T17:16:36,887 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/44b5bdb4eb14479b93fc3cf58ec56e24 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/44b5bdb4eb14479b93fc3cf58ec56e24 2024-11-07T17:16:36,888 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/525d67dbb0564c72bde9b00b74e96d89 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/525d67dbb0564c72bde9b00b74e96d89 2024-11-07T17:16:36,888 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/883ec416cd41497e871427e68aa9a473 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/883ec416cd41497e871427e68aa9a473 2024-11-07T17:16:36,889 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/df11a1dd3d254d6cac15971e832bc2d1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/df11a1dd3d254d6cac15971e832bc2d1 2024-11-07T17:16:36,891 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/e925f984810049da86e402b97fb41cec to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/A/e925f984810049da86e402b97fb41cec 2024-11-07T17:16:36,892 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d0813e709a0b46d9b994e71a56090864 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d0813e709a0b46d9b994e71a56090864 2024-11-07T17:16:36,893 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d4ef0b6f8189427aa1aa7a59a480967c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d4ef0b6f8189427aa1aa7a59a480967c 2024-11-07T17:16:36,894 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d58a1cef976241ab8a71735fdaf522b2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/d58a1cef976241ab8a71735fdaf522b2 2024-11-07T17:16:36,895 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/e92f9c8b2e7a4f62944df02eeef9b9b4 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/e92f9c8b2e7a4f62944df02eeef9b9b4 2024-11-07T17:16:36,896 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/fca384905cb64486b789ac7e0be7f530 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/B/fca384905cb64486b789ac7e0be7f530 2024-11-07T17:16:36,898 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0694d81003dc43c2988f00ba2963591f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/0694d81003dc43c2988f00ba2963591f 2024-11-07T17:16:36,898 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/15254af0d7394574bae70e4055d727ff to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/15254af0d7394574bae70e4055d727ff 2024-11-07T17:16:36,899 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8b7b8503f23141efa02c5988e4458726 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/8b7b8503f23141efa02c5988e4458726 2024-11-07T17:16:36,900 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b5ae047995a74abba7950461adb8daa5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/b5ae047995a74abba7950461adb8daa5 2024-11-07T17:16:36,901 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/e1c08a334f1444d8b8d2c74c424455d8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/C/e1c08a334f1444d8b8d2c74c424455d8 2024-11-07T17:16:36,904 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/recovered.edits/400.seqid to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6/recovered.edits/400.seqid 2024-11-07T17:16:36,904 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/9c87afdeea8af3233cd3eafc720d61a6 2024-11-07T17:16:36,904 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T17:16:36,906 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:36,910 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T17:16:36,912 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T17:16:36,912 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:36,912 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T17:16:36,913 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730999796913"}]},"ts":"9223372036854775807"} 2024-11-07T17:16:36,914 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T17:16:36,914 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9c87afdeea8af3233cd3eafc720d61a6, NAME => 'TestAcidGuarantees,,1730999771538.9c87afdeea8af3233cd3eafc720d61a6.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T17:16:36,914 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T17:16:36,914 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730999796914"}]},"ts":"9223372036854775807"} 2024-11-07T17:16:36,916 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T17:16:36,918 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:36,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 39 msec 2024-11-07T17:16:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-07T17:16:36,981 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-07T17:16:36,991 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=238 (was 242), OpenFileDescriptor=453 (was 462), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=473 (was 407) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2762 (was 2925) 2024-11-07T17:16:36,999 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=473, ProcessCount=11, AvailableMemoryMB=2761 2024-11-07T17:16:37,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:16:37,001 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:16:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:37,002 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:16:37,002 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:37,002 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-11-07T17:16:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-07T17:16:37,003 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:16:37,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742172_1348 (size=960) 2024-11-07T17:16:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-07T17:16:37,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-07T17:16:37,410 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:16:37,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742173_1349 (size=53) 2024-11-07T17:16:37,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-07T17:16:37,816 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:16:37,816 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 73d52d69bc80a97d9d4aef7a7d44d969, disabling compactions & flushes 2024-11-07T17:16:37,816 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:37,816 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:37,816 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. after waiting 0 ms 2024-11-07T17:16:37,816 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:37,816 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:37,816 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:37,817 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:16:37,817 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730999797817"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999797817"}]},"ts":"1730999797817"} 2024-11-07T17:16:37,818 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:16:37,818 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:16:37,819 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999797818"}]},"ts":"1730999797818"} 2024-11-07T17:16:37,819 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T17:16:37,824 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, ASSIGN}] 2024-11-07T17:16:37,825 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, ASSIGN 2024-11-07T17:16:37,825 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:16:37,976 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:37,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:16:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-07T17:16:38,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:38,131 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:38,131 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:16:38,131 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,131 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:16:38,131 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,131 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,132 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,134 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:38,134 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d52d69bc80a97d9d4aef7a7d44d969 columnFamilyName A 2024-11-07T17:16:38,134 DEBUG [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:38,134 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(327): Store=73d52d69bc80a97d9d4aef7a7d44d969/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:38,134 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,135 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:38,135 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d52d69bc80a97d9d4aef7a7d44d969 columnFamilyName B 2024-11-07T17:16:38,136 DEBUG [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:38,136 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(327): Store=73d52d69bc80a97d9d4aef7a7d44d969/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:38,136 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,137 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:38,137 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d52d69bc80a97d9d4aef7a7d44d969 columnFamilyName C 2024-11-07T17:16:38,137 DEBUG [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:38,137 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(327): Store=73d52d69bc80a97d9d4aef7a7d44d969/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:38,138 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:38,138 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,138 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,140 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:16:38,141 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:38,143 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:16:38,143 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 73d52d69bc80a97d9d4aef7a7d44d969; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61146905, jitterRate=-0.08884011209011078}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:16:38,144 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:38,145 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., pid=100, masterSystemTime=1730999798128 2024-11-07T17:16:38,147 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:38,147 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:38,147 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:38,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-07T17:16:38,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 in 171 msec 2024-11-07T17:16:38,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-07T17:16:38,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, ASSIGN in 326 msec 2024-11-07T17:16:38,152 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:16:38,152 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999798152"}]},"ts":"1730999798152"} 2024-11-07T17:16:38,153 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T17:16:38,156 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:16:38,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-11-07T17:16:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-07T17:16:39,107 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-07T17:16:39,108 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51f7d511 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75b14fbd 2024-11-07T17:16:39,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b6cf8cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:39,113 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:39,114 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:39,115 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:16:39,116 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:16:39,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:16:39,118 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:16:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-07T17:16:39,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742174_1350 (size=996) 2024-11-07T17:16:39,528 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-07T17:16:39,528 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-07T17:16:39,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:16:39,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, REOPEN/MOVE}] 2024-11-07T17:16:39,532 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, REOPEN/MOVE 2024-11-07T17:16:39,532 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:39,533 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:16:39,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:16:39,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:39,685 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:39,685 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:16:39,685 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 73d52d69bc80a97d9d4aef7a7d44d969, disabling compactions & flushes 2024-11-07T17:16:39,685 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:39,685 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:39,685 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. after waiting 0 ms 2024-11-07T17:16:39,685 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:39,688 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T17:16:39,689 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:39,689 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:39,689 WARN [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 73d52d69bc80a97d9d4aef7a7d44d969 to self. 2024-11-07T17:16:39,690 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:39,691 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=CLOSED 2024-11-07T17:16:39,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-07T17:16:39,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 in 158 msec 2024-11-07T17:16:39,693 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, REOPEN/MOVE; state=CLOSED, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=true 2024-11-07T17:16:39,843 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:39,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:16:39,995 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:39,997 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:39,997 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:16:39,998 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:39,998 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:16:39,998 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:39,998 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:39,999 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,000 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:40,000 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d52d69bc80a97d9d4aef7a7d44d969 columnFamilyName A 2024-11-07T17:16:40,001 DEBUG [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:40,001 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(327): Store=73d52d69bc80a97d9d4aef7a7d44d969/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:40,002 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,002 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:40,002 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d52d69bc80a97d9d4aef7a7d44d969 columnFamilyName B 2024-11-07T17:16:40,002 DEBUG [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:40,003 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(327): Store=73d52d69bc80a97d9d4aef7a7d44d969/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:40,003 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,003 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:16:40,003 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d52d69bc80a97d9d4aef7a7d44d969 columnFamilyName C 2024-11-07T17:16:40,003 DEBUG [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:40,004 INFO [StoreOpener-73d52d69bc80a97d9d4aef7a7d44d969-1 {}] regionserver.HStore(327): Store=73d52d69bc80a97d9d4aef7a7d44d969/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:16:40,004 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,004 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,005 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,006 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:16:40,007 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,008 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 73d52d69bc80a97d9d4aef7a7d44d969; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73898091, jitterRate=0.10116736590862274}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:16:40,008 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:40,009 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., pid=105, masterSystemTime=1730999799995 2024-11-07T17:16:40,010 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,010 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,010 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=OPEN, openSeqNum=5, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-11-07T17:16:40,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 in 167 msec 2024-11-07T17:16:40,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-07T17:16:40,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, REOPEN/MOVE in 481 msec 2024-11-07T17:16:40,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-07T17:16:40,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-07T17:16:40,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 897 msec 2024-11-07T17:16:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-07T17:16:40,018 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc42ea6 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62f74604 2024-11-07T17:16:40,026 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec15031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,027 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-11-07T17:16:40,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,031 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-11-07T17:16:40,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-11-07T17:16:40,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,038 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-11-07T17:16:40,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,043 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-11-07T17:16:40,046 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,046 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e3203d9 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61ec0f48 2024-11-07T17:16:40,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e4d3d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,049 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-11-07T17:16:40,053 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7284f16d to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47679076 2024-11-07T17:16:40,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68035c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,059 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37a637ac to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4cb9e50e 2024-11-07T17:16:40,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3eab689a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:16:40,066 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-07T17:16:40,067 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:40,068 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:40,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:40,069 DEBUG [hconnection-0x31ae54af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,071 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,074 DEBUG [hconnection-0x41cbba61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,076 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,081 DEBUG [hconnection-0x450c0712-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,081 DEBUG [hconnection-0x639b509c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,081 DEBUG [hconnection-0x491a2234-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,082 DEBUG [hconnection-0x204de444-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,082 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,082 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,082 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,082 DEBUG [hconnection-0x7bf595b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,083 DEBUG [hconnection-0x7b901d91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,083 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,083 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,083 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46646, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,084 DEBUG [hconnection-0x3393d3c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,084 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46654, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,086 DEBUG [hconnection-0x54d59e25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:16:40,087 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:16:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:16:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:40,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999860129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999860131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999860131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999860131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999860133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078b68d5d3360a469f89ca85c765836fec_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999800108/Put/seqid=0 2024-11-07T17:16:40,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742175_1351 (size=14594) 2024-11-07T17:16:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:40,219 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:40,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:40,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999860234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999860234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999860234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999860234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999860235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:40,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:40,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:40,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999860438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999860438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999860438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999860439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999860439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,558 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:40,562 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078b68d5d3360a469f89ca85c765836fec_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078b68d5d3360a469f89ca85c765836fec_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:40,563 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/b79380b7eb0d4faa91e5af467393cf5a, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:40,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/b79380b7eb0d4faa91e5af467393cf5a is 175, key is test_row_0/A:col10/1730999800108/Put/seqid=0 2024-11-07T17:16:40,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742176_1352 (size=39549) 2024-11-07T17:16:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:40,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:40,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:40,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999860741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999860741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999860742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999860743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999860743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:40,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:40,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,968 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/b79380b7eb0d4faa91e5af467393cf5a 2024-11-07T17:16:40,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:40,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:40,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:40,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:40,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:40,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/93a5832c58834a80b2be0203073b1038 is 50, key is test_row_0/B:col10/1730999800108/Put/seqid=0 2024-11-07T17:16:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742177_1353 (size=12001) 2024-11-07T17:16:41,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T17:16:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:41,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:41,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999861246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:41,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999861248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999861248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999861248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999861249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,291 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:41,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:41,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/93a5832c58834a80b2be0203073b1038 2024-11-07T17:16:41,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/771cbbc17a54455994b6107c009f8d39 is 50, key is test_row_0/C:col10/1730999800108/Put/seqid=0 2024-11-07T17:16:41,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742178_1354 (size=12001) 2024-11-07T17:16:41,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:41,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:41,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:41,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:41,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:41,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/771cbbc17a54455994b6107c009f8d39 2024-11-07T17:16:41,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/b79380b7eb0d4faa91e5af467393cf5a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a 2024-11-07T17:16:41,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a, entries=200, sequenceid=15, filesize=38.6 K 2024-11-07T17:16:41,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/93a5832c58834a80b2be0203073b1038 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/93a5832c58834a80b2be0203073b1038 2024-11-07T17:16:41,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/93a5832c58834a80b2be0203073b1038, entries=150, sequenceid=15, filesize=11.7 K 2024-11-07T17:16:41,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/771cbbc17a54455994b6107c009f8d39 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/771cbbc17a54455994b6107c009f8d39 2024-11-07T17:16:41,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/771cbbc17a54455994b6107c009f8d39, entries=150, sequenceid=15, filesize=11.7 K 2024-11-07T17:16:41,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1736ms, sequenceid=15, compaction requested=false 2024-11-07T17:16:41,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:41,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:41,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:41,903 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:41,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107472db18b368e49d296059f2ad071f859_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999800129/Put/seqid=0 2024-11-07T17:16:41,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742179_1355 (size=12154) 2024-11-07T17:16:42,149 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T17:16:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:42,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:42,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999862260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999862261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999862261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999862263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999862264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:42,318 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107472db18b368e49d296059f2ad071f859_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107472db18b368e49d296059f2ad071f859_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:42,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/2031ffe4c4e74ebbb585c34ab4e7219b, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:42,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/2031ffe4c4e74ebbb585c34ab4e7219b is 175, key is test_row_0/A:col10/1730999800129/Put/seqid=0 2024-11-07T17:16:42,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742180_1356 (size=30955) 2024-11-07T17:16:42,328 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/2031ffe4c4e74ebbb585c34ab4e7219b 2024-11-07T17:16:42,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/e4f10f096ebc4251ac7221b7f4a0c797 is 50, key is test_row_0/B:col10/1730999800129/Put/seqid=0 2024-11-07T17:16:42,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742181_1357 (size=12001) 2024-11-07T17:16:42,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999862366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999862366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999862367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999862369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999862570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999862571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999862571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999862573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,739 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/e4f10f096ebc4251ac7221b7f4a0c797 2024-11-07T17:16:42,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3d10eea8a81b4653809c036354f3eeae is 50, key is test_row_0/C:col10/1730999800129/Put/seqid=0 2024-11-07T17:16:42,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742182_1358 (size=12001) 2024-11-07T17:16:42,752 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3d10eea8a81b4653809c036354f3eeae 2024-11-07T17:16:42,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/2031ffe4c4e74ebbb585c34ab4e7219b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b 2024-11-07T17:16:42,759 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b, entries=150, sequenceid=40, filesize=30.2 K 2024-11-07T17:16:42,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/e4f10f096ebc4251ac7221b7f4a0c797 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e4f10f096ebc4251ac7221b7f4a0c797 2024-11-07T17:16:42,763 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e4f10f096ebc4251ac7221b7f4a0c797, entries=150, sequenceid=40, filesize=11.7 K 2024-11-07T17:16:42,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3d10eea8a81b4653809c036354f3eeae as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3d10eea8a81b4653809c036354f3eeae 2024-11-07T17:16:42,767 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3d10eea8a81b4653809c036354f3eeae, entries=150, sequenceid=40, filesize=11.7 K 2024-11-07T17:16:42,768 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 73d52d69bc80a97d9d4aef7a7d44d969 in 865ms, sequenceid=40, compaction requested=false 2024-11-07T17:16:42,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:42,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:42,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-07T17:16:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-07T17:16:42,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-07T17:16:42,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7010 sec 2024-11-07T17:16:42,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.7050 sec 2024-11-07T17:16:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:42,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T17:16:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:42,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107eeac75e517af4864bd0c28a3ec41ca43_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:42,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742183_1359 (size=14594) 2024-11-07T17:16:42,892 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:42,896 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107eeac75e517af4864bd0c28a3ec41ca43_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107eeac75e517af4864bd0c28a3ec41ca43_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:42,897 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/775489e744cc4d778fbc7df089f0421a, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:42,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/775489e744cc4d778fbc7df089f0421a is 175, key is test_row_0/A:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:42,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742184_1360 (size=39549) 2024-11-07T17:16:42,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999862929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999862929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999862929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:42,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999862938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999863040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999863040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999863040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999863046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999863245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999863245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999863246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999863251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,302 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/775489e744cc4d778fbc7df089f0421a 2024-11-07T17:16:43,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/da8183934fc042a689f226e86e9d1698 is 50, key is test_row_0/B:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:43,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742185_1361 (size=12001) 2024-11-07T17:16:43,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999863548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999863548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999863550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999863557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:43,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/da8183934fc042a689f226e86e9d1698 2024-11-07T17:16:43,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/1315452bef6c4c659cd591daaaecc05f is 50, key is test_row_0/C:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742186_1362 (size=12001) 2024-11-07T17:16:44,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999864055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999864055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999864057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999864060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/1315452bef6c4c659cd591daaaecc05f 2024-11-07T17:16:44,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/775489e744cc4d778fbc7df089f0421a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a 2024-11-07T17:16:44,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a, entries=200, sequenceid=53, filesize=38.6 K 2024-11-07T17:16:44,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/da8183934fc042a689f226e86e9d1698 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/da8183934fc042a689f226e86e9d1698 2024-11-07T17:16:44,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/da8183934fc042a689f226e86e9d1698, entries=150, sequenceid=53, filesize=11.7 K 2024-11-07T17:16:44,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/1315452bef6c4c659cd591daaaecc05f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/1315452bef6c4c659cd591daaaecc05f 2024-11-07T17:16:44,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/1315452bef6c4c659cd591daaaecc05f, entries=150, sequenceid=53, filesize=11.7 K 2024-11-07T17:16:44,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1268ms, sequenceid=53, compaction requested=true 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:44,144 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:44,144 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:44,145 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110053 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:44,145 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:44,145 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,145 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=107.5 K 2024-11-07T17:16:44,145 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,145 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a] 2024-11-07T17:16:44,145 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:44,145 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:44,146 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,146 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b79380b7eb0d4faa91e5af467393cf5a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1730999800086 2024-11-07T17:16:44,146 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/93a5832c58834a80b2be0203073b1038, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e4f10f096ebc4251ac7221b7f4a0c797, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/da8183934fc042a689f226e86e9d1698] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=35.2 K 2024-11-07T17:16:44,146 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2031ffe4c4e74ebbb585c34ab4e7219b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730999800129 2024-11-07T17:16:44,146 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 93a5832c58834a80b2be0203073b1038, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1730999800086 2024-11-07T17:16:44,146 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e4f10f096ebc4251ac7221b7f4a0c797, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730999800129 2024-11-07T17:16:44,146 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 775489e744cc4d778fbc7df089f0421a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999802258 2024-11-07T17:16:44,147 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting da8183934fc042a689f226e86e9d1698, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999802258 2024-11-07T17:16:44,154 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:44,154 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#306 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:44,155 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2aca49c2889e4bf895115c6be1f892fd is 50, key is test_row_0/B:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:44,156 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024110766127f2995ed4260bc772c1a0aabadf9_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:44,159 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024110766127f2995ed4260bc772c1a0aabadf9_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:44,159 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110766127f2995ed4260bc772c1a0aabadf9_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:44,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742187_1363 (size=12104) 2024-11-07T17:16:44,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742188_1364 (size=4469) 2024-11-07T17:16:44,167 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#307 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:44,167 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9a9b13d983e846c28f0e6c191c238537 is 175, key is test_row_0/A:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T17:16:44,172 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-07T17:16:44,173 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2aca49c2889e4bf895115c6be1f892fd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2aca49c2889e4bf895115c6be1f892fd 2024-11-07T17:16:44,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742189_1365 (size=31058) 2024-11-07T17:16:44,174 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:44,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-07T17:16:44,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:44,175 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:44,175 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:44,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:44,177 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into 2aca49c2889e4bf895115c6be1f892fd(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:44,177 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:44,177 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999804144; duration=0sec 2024-11-07T17:16:44,177 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:44,177 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:44,178 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:44,178 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:44,178 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:44,178 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,178 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/771cbbc17a54455994b6107c009f8d39, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3d10eea8a81b4653809c036354f3eeae, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/1315452bef6c4c659cd591daaaecc05f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=35.2 K 2024-11-07T17:16:44,179 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 771cbbc17a54455994b6107c009f8d39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1730999800086 2024-11-07T17:16:44,179 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d10eea8a81b4653809c036354f3eeae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730999800129 2024-11-07T17:16:44,179 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1315452bef6c4c659cd591daaaecc05f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999802258 2024-11-07T17:16:44,185 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#308 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:44,185 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/f0dd39b9ee9c4c3b91395dc6b068d5d7 is 50, key is test_row_0/C:col10/1730999802875/Put/seqid=0 2024-11-07T17:16:44,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742190_1366 (size=12104) 2024-11-07T17:16:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:44,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T17:16:44,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:44,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:44,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:44,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:44,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:44,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:44,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071fca64a58bec419abb65d07b334552a9_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999802915/Put/seqid=0 2024-11-07T17:16:44,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742191_1367 (size=14594) 2024-11-07T17:16:44,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999864323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:44,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:44,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999864427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:44,479 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:44,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:44,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,576 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9a9b13d983e846c28f0e6c191c238537 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9a9b13d983e846c28f0e6c191c238537 2024-11-07T17:16:44,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 9a9b13d983e846c28f0e6c191c238537(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:44,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:44,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999804144; duration=0sec 2024-11-07T17:16:44,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:44,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:44,593 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/f0dd39b9ee9c4c3b91395dc6b068d5d7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/f0dd39b9ee9c4c3b91395dc6b068d5d7 2024-11-07T17:16:44,597 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into f0dd39b9ee9c4c3b91395dc6b068d5d7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:44,597 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:44,597 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999804144; duration=0sec 2024-11-07T17:16:44,598 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:44,598 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:44,632 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:44,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:44,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999864632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,692 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:44,695 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071fca64a58bec419abb65d07b334552a9_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071fca64a58bec419abb65d07b334552a9_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:44,696 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/4e3609af8c1a4e62bcb914d5488eb2b6, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:44,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/4e3609af8c1a4e62bcb914d5488eb2b6 is 175, key is test_row_0/A:col10/1730999802915/Put/seqid=0 2024-11-07T17:16:44,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742192_1368 (size=39549) 2024-11-07T17:16:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:44,784 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:44,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999864936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,937 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:44,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:44,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:44,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:44,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:45,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999865060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:45,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999865067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:45,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999865069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:45,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999865071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:45,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:45,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,101 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/4e3609af8c1a4e62bcb914d5488eb2b6 2024-11-07T17:16:45,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/9d6b3d49f305447fab34b49a3934a360 is 50, key is test_row_0/B:col10/1730999802915/Put/seqid=0 2024-11-07T17:16:45,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742193_1369 (size=12001) 2024-11-07T17:16:45,243 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:45,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:45,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:45,396 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:45,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:45,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999865441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/9d6b3d49f305447fab34b49a3934a360 2024-11-07T17:16:45,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c72e6aa3f470469a88a00fb7c7e48754 is 50, key is test_row_0/C:col10/1730999802915/Put/seqid=0 2024-11-07T17:16:45,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742194_1370 (size=12001) 2024-11-07T17:16:45,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c72e6aa3f470469a88a00fb7c7e48754 2024-11-07T17:16:45,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/4e3609af8c1a4e62bcb914d5488eb2b6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6 2024-11-07T17:16:45,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6, entries=200, sequenceid=78, filesize=38.6 K 2024-11-07T17:16:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/9d6b3d49f305447fab34b49a3934a360 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9d6b3d49f305447fab34b49a3934a360 2024-11-07T17:16:45,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9d6b3d49f305447fab34b49a3934a360, entries=150, sequenceid=78, filesize=11.7 K 2024-11-07T17:16:45,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c72e6aa3f470469a88a00fb7c7e48754 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c72e6aa3f470469a88a00fb7c7e48754 2024-11-07T17:16:45,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c72e6aa3f470469a88a00fb7c7e48754, entries=150, sequenceid=78, filesize=11.7 K 2024-11-07T17:16:45,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1269ms, sequenceid=78, compaction requested=false 2024-11-07T17:16:45,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:45,549 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:45,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T17:16:45,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:45,549 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T17:16:45,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:45,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:45,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:45,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:45,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:45,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:45,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072e3738c32cef45c7a00e1aa5b42d5de5_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999804322/Put/seqid=0 2024-11-07T17:16:45,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742195_1371 (size=12154) 2024-11-07T17:16:45,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:45,978 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072e3738c32cef45c7a00e1aa5b42d5de5_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072e3738c32cef45c7a00e1aa5b42d5de5_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:45,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/0fdc3e7e06b743e98a58946a25479406, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:45,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/0fdc3e7e06b743e98a58946a25479406 is 175, key is test_row_0/A:col10/1730999804322/Put/seqid=0 2024-11-07T17:16:45,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742196_1372 (size=30955) 2024-11-07T17:16:45,984 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/0fdc3e7e06b743e98a58946a25479406 2024-11-07T17:16:45,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2f6d274634b542ddae9a9c4ef413e99a is 50, key is test_row_0/B:col10/1730999804322/Put/seqid=0 2024-11-07T17:16:45,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742197_1373 (size=12001) 2024-11-07T17:16:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:46,398 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2f6d274634b542ddae9a9c4ef413e99a 2024-11-07T17:16:46,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/0519cadcc0f641f99888447ac74b7db0 is 50, key is test_row_0/C:col10/1730999804322/Put/seqid=0 2024-11-07T17:16:46,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742198_1374 (size=12001) 2024-11-07T17:16:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:46,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:46,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999866580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:46,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999866686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:46,821 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/0519cadcc0f641f99888447ac74b7db0 2024-11-07T17:16:46,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/0fdc3e7e06b743e98a58946a25479406 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406 2024-11-07T17:16:46,830 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406, entries=150, sequenceid=92, filesize=30.2 K 2024-11-07T17:16:46,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2f6d274634b542ddae9a9c4ef413e99a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2f6d274634b542ddae9a9c4ef413e99a 2024-11-07T17:16:46,835 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2f6d274634b542ddae9a9c4ef413e99a, entries=150, sequenceid=92, filesize=11.7 K 2024-11-07T17:16:46,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/0519cadcc0f641f99888447ac74b7db0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/0519cadcc0f641f99888447ac74b7db0 2024-11-07T17:16:46,840 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/0519cadcc0f641f99888447ac74b7db0, entries=150, sequenceid=92, filesize=11.7 K 2024-11-07T17:16:46,841 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1292ms, sequenceid=92, compaction requested=true 2024-11-07T17:16:46,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:46,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:46,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-07T17:16:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-07T17:16:46,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-07T17:16:46,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6670 sec 2024-11-07T17:16:46,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.6710 sec 2024-11-07T17:16:46,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:46,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T17:16:46,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:46,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:46,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:46,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:46,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:46,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:46,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a20c7162ad05413bace6c131fc93858c_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:46,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742199_1375 (size=12154) 2024-11-07T17:16:46,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:46,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999866950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999867057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999867071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,075 DEBUG [Thread-1588 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:47,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999867077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999867077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,081 DEBUG [Thread-1590 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:47,081 DEBUG [Thread-1582 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:47,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999867079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,083 DEBUG [Thread-1586 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:47,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999867262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,318 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:47,322 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a20c7162ad05413bace6c131fc93858c_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a20c7162ad05413bace6c131fc93858c_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:47,323 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/c49572f4d1cf4a1cb43661f18bb30ef7, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:47,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/c49572f4d1cf4a1cb43661f18bb30ef7 is 175, key is test_row_0/A:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742200_1376 (size=30955) 2024-11-07T17:16:47,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:47,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999867573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:47,728 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/c49572f4d1cf4a1cb43661f18bb30ef7 2024-11-07T17:16:47,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/f07ae36da3e74665939449e1b65f724f is 50, key is test_row_0/B:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:47,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742201_1377 (size=12001) 2024-11-07T17:16:47,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/f07ae36da3e74665939449e1b65f724f 2024-11-07T17:16:47,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/e81ff8f3028e4d259d45314079b58474 is 50, key is test_row_0/C:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:47,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742202_1378 (size=12001) 2024-11-07T17:16:48,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:48,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999868076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:48,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/e81ff8f3028e4d259d45314079b58474 2024-11-07T17:16:48,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/c49572f4d1cf4a1cb43661f18bb30ef7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7 2024-11-07T17:16:48,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7, entries=150, sequenceid=117, filesize=30.2 K 2024-11-07T17:16:48,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/f07ae36da3e74665939449e1b65f724f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/f07ae36da3e74665939449e1b65f724f 2024-11-07T17:16:48,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/f07ae36da3e74665939449e1b65f724f, entries=150, sequenceid=117, filesize=11.7 K 2024-11-07T17:16:48,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/e81ff8f3028e4d259d45314079b58474 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e81ff8f3028e4d259d45314079b58474 2024-11-07T17:16:48,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e81ff8f3028e4d259d45314079b58474, entries=150, sequenceid=117, filesize=11.7 K 2024-11-07T17:16:48,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1278ms, sequenceid=117, compaction requested=true 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:48,173 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:48,173 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:48,174 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:48,174 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:48,175 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:48,175 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:48,175 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:48,175 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:48,175 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9a9b13d983e846c28f0e6c191c238537, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=129.4 K 2024-11-07T17:16:48,175 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2aca49c2889e4bf895115c6be1f892fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9d6b3d49f305447fab34b49a3934a360, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2f6d274634b542ddae9a9c4ef413e99a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/f07ae36da3e74665939449e1b65f724f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=47.0 K 2024-11-07T17:16:48,175 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:48,175 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9a9b13d983e846c28f0e6c191c238537, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7] 2024-11-07T17:16:48,175 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2aca49c2889e4bf895115c6be1f892fd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999802258 2024-11-07T17:16:48,175 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a9b13d983e846c28f0e6c191c238537, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999802258 2024-11-07T17:16:48,175 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d6b3d49f305447fab34b49a3934a360, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1730999802899 2024-11-07T17:16:48,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e3609af8c1a4e62bcb914d5488eb2b6, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1730999802899 2024-11-07T17:16:48,176 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f6d274634b542ddae9a9c4ef413e99a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1730999804297 2024-11-07T17:16:48,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fdc3e7e06b743e98a58946a25479406, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1730999804297 2024-11-07T17:16:48,176 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f07ae36da3e74665939449e1b65f724f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999806563 2024-11-07T17:16:48,176 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c49572f4d1cf4a1cb43661f18bb30ef7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999806563 2024-11-07T17:16:48,182 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:48,184 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:48,184 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/6b7e8dc5ff1b49cab412a3fac1e21d93 is 50, key is test_row_0/B:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:48,185 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107b56ba9e0bb764a3c9967779674a029b8_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:48,187 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107b56ba9e0bb764a3c9967779674a029b8_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:48,187 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b56ba9e0bb764a3c9967779674a029b8_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:48,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742203_1379 (size=12241) 2024-11-07T17:16:48,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742204_1380 (size=4469) 2024-11-07T17:16:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T17:16:48,279 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-07T17:16:48,281 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:48,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-07T17:16:48,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T17:16:48,282 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:48,283 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:48,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:48,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T17:16:48,434 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:48,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T17:16:48,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:48,435 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:16:48,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:48,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:48,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:48,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:48,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:48,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:48,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072b70b05f6dbd49b0b704a25a291b0084_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999806949/Put/seqid=0 2024-11-07T17:16:48,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742205_1381 (size=12154) 2024-11-07T17:16:48,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T17:16:48,595 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/6b7e8dc5ff1b49cab412a3fac1e21d93 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6b7e8dc5ff1b49cab412a3fac1e21d93 2024-11-07T17:16:48,598 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#318 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:48,599 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/4335ebf28e1248228c79a63dc8a33aa6 is 175, key is test_row_0/A:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:48,600 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into 6b7e8dc5ff1b49cab412a3fac1e21d93(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:48,600 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:48,600 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=12, startTime=1730999808173; duration=0sec 2024-11-07T17:16:48,600 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:48,601 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:48,601 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:16:48,602 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:16:48,602 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:48,602 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:48,602 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/f0dd39b9ee9c4c3b91395dc6b068d5d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c72e6aa3f470469a88a00fb7c7e48754, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/0519cadcc0f641f99888447ac74b7db0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e81ff8f3028e4d259d45314079b58474] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=47.0 K 2024-11-07T17:16:48,602 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f0dd39b9ee9c4c3b91395dc6b068d5d7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1730999802258 2024-11-07T17:16:48,603 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c72e6aa3f470469a88a00fb7c7e48754, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1730999802899 2024-11-07T17:16:48,603 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0519cadcc0f641f99888447ac74b7db0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1730999804297 2024-11-07T17:16:48,603 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e81ff8f3028e4d259d45314079b58474, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999806563 2024-11-07T17:16:48,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742206_1382 (size=31195) 2024-11-07T17:16:48,612 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#321 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:48,613 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/ca32ee4e847a45f8a0ac29675c9e47aa is 50, key is test_row_0/C:col10/1730999806572/Put/seqid=0 2024-11-07T17:16:48,615 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/4335ebf28e1248228c79a63dc8a33aa6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4335ebf28e1248228c79a63dc8a33aa6 2024-11-07T17:16:48,620 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 4335ebf28e1248228c79a63dc8a33aa6(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:48,621 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:48,621 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=12, startTime=1730999808173; duration=0sec 2024-11-07T17:16:48,621 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:48,621 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:48,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742207_1383 (size=12241) 2024-11-07T17:16:48,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:48,852 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072b70b05f6dbd49b0b704a25a291b0084_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072b70b05f6dbd49b0b704a25a291b0084_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:48,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/94b547484cf54f2fb880015b433f18ce, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:48,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/94b547484cf54f2fb880015b433f18ce is 175, key is test_row_0/A:col10/1730999806949/Put/seqid=0 2024-11-07T17:16:48,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742208_1384 (size=30955) 2024-11-07T17:16:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T17:16:49,030 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/ca32ee4e847a45f8a0ac29675c9e47aa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/ca32ee4e847a45f8a0ac29675c9e47aa 2024-11-07T17:16:49,035 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into ca32ee4e847a45f8a0ac29675c9e47aa(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:49,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:49,035 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=12, startTime=1730999808173; duration=0sec 2024-11-07T17:16:49,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:49,035 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:49,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:49,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:49,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:49,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999869196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:49,259 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=128, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/94b547484cf54f2fb880015b433f18ce 2024-11-07T17:16:49,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/d51ac716ebf84c4ba75ce677d214b6ff is 50, key is test_row_0/B:col10/1730999806949/Put/seqid=0 2024-11-07T17:16:49,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742209_1385 (size=12001) 2024-11-07T17:16:49,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:49,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999869302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T17:16:49,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:49,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999869505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:49,672 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/d51ac716ebf84c4ba75ce677d214b6ff 2024-11-07T17:16:49,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/d1af0a67a08f4b9d81261f556950a386 is 50, key is test_row_0/C:col10/1730999806949/Put/seqid=0 2024-11-07T17:16:49,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742210_1386 (size=12001) 2024-11-07T17:16:49,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999869811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,083 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/d1af0a67a08f4b9d81261f556950a386 2024-11-07T17:16:50,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/94b547484cf54f2fb880015b433f18ce as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce 2024-11-07T17:16:50,091 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce, entries=150, sequenceid=128, filesize=30.2 K 2024-11-07T17:16:50,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/d51ac716ebf84c4ba75ce677d214b6ff as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/d51ac716ebf84c4ba75ce677d214b6ff 2024-11-07T17:16:50,096 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/d51ac716ebf84c4ba75ce677d214b6ff, entries=150, sequenceid=128, filesize=11.7 K 2024-11-07T17:16:50,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/d1af0a67a08f4b9d81261f556950a386 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/d1af0a67a08f4b9d81261f556950a386 2024-11-07T17:16:50,100 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/d1af0a67a08f4b9d81261f556950a386, entries=150, sequenceid=128, filesize=11.7 K 2024-11-07T17:16:50,101 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1666ms, sequenceid=128, compaction requested=false 2024-11-07T17:16:50,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:50,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-07T17:16:50,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-07T17:16:50,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-07T17:16:50,104 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8190 sec 2024-11-07T17:16:50,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.8230 sec 2024-11-07T17:16:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:50,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T17:16:50,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:50,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:50,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:50,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:50,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:50,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:50,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110707a4ad020d874ceb8dc7e5318a6f809d_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:50,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742211_1387 (size=12304) 2024-11-07T17:16:50,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:50,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999870362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T17:16:50,386 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-07T17:16:50,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:50,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-07T17:16:50,388 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:50,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T17:16:50,389 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:50,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:50,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:50,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999870467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T17:16:50,541 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T17:16:50,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:50,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999870669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T17:16:50,693 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T17:16:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,739 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:50,743 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110707a4ad020d874ceb8dc7e5318a6f809d_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110707a4ad020d874ceb8dc7e5318a6f809d_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:50,744 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/1b744b917c8248b893558645efc8ef50, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:50,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/1b744b917c8248b893558645efc8ef50 is 175, key is test_row_0/A:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:50,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742212_1388 (size=31105) 2024-11-07T17:16:50,749 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/1b744b917c8248b893558645efc8ef50 2024-11-07T17:16:50,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/a1b3f0017fb848b18027dd946c3e0204 is 50, key is test_row_0/B:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:50,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742213_1389 (size=12151) 2024-11-07T17:16:50,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T17:16:50,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:50,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999870974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T17:16:50,998 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:50,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T17:16:50,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:50,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:50,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:50,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:51,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46616 deadline: 1730999871089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,091 DEBUG [Thread-1588 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:51,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46646 deadline: 1730999871102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999871102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,104 DEBUG [Thread-1586 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:51,104 DEBUG [Thread-1590 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8175 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:51,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999871107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,109 DEBUG [Thread-1582 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:16:51,150 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T17:16:51,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:51,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:51,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:51,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/a1b3f0017fb848b18027dd946c3e0204 2024-11-07T17:16:51,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/50f55d1b5f374ec488f9e301f1197fcc is 50, key is test_row_0/C:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:51,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742214_1390 (size=12151) 2024-11-07T17:16:51,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/50f55d1b5f374ec488f9e301f1197fcc 2024-11-07T17:16:51,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/1b744b917c8248b893558645efc8ef50 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50 2024-11-07T17:16:51,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50, entries=150, sequenceid=157, filesize=30.4 K 2024-11-07T17:16:51,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/a1b3f0017fb848b18027dd946c3e0204 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a1b3f0017fb848b18027dd946c3e0204 2024-11-07T17:16:51,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a1b3f0017fb848b18027dd946c3e0204, entries=150, sequenceid=157, filesize=11.9 K 2024-11-07T17:16:51,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/50f55d1b5f374ec488f9e301f1197fcc as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/50f55d1b5f374ec488f9e301f1197fcc 2024-11-07T17:16:51,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/50f55d1b5f374ec488f9e301f1197fcc, entries=150, sequenceid=157, filesize=11.9 K 2024-11-07T17:16:51,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 73d52d69bc80a97d9d4aef7a7d44d969 in 879ms, sequenceid=157, compaction requested=true 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:51,201 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:51,201 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:51,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:51,202 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:51,202 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:51,202 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:51,202 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:51,202 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,202 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,202 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6b7e8dc5ff1b49cab412a3fac1e21d93, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/d51ac716ebf84c4ba75ce677d214b6ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a1b3f0017fb848b18027dd946c3e0204] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=35.5 K 2024-11-07T17:16:51,202 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4335ebf28e1248228c79a63dc8a33aa6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=91.1 K 2024-11-07T17:16:51,202 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,202 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4335ebf28e1248228c79a63dc8a33aa6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50] 2024-11-07T17:16:51,203 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b7e8dc5ff1b49cab412a3fac1e21d93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999806563 2024-11-07T17:16:51,203 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4335ebf28e1248228c79a63dc8a33aa6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999806563 2024-11-07T17:16:51,203 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d51ac716ebf84c4ba75ce677d214b6ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730999806913 2024-11-07T17:16:51,203 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94b547484cf54f2fb880015b433f18ce, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730999806913 2024-11-07T17:16:51,203 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a1b3f0017fb848b18027dd946c3e0204, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730999809162 2024-11-07T17:16:51,203 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b744b917c8248b893558645efc8ef50, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730999809162 2024-11-07T17:16:51,209 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:51,211 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#327 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:51,211 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/dc147662f6ab4e809cc3719fbc70d4ef is 50, key is test_row_0/B:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:51,212 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107198450319e8c4232979b32bc8de3d96c_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:51,214 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107198450319e8c4232979b32bc8de3d96c_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:51,214 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107198450319e8c4232979b32bc8de3d96c_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:51,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742215_1391 (size=12493) 2024-11-07T17:16:51,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742216_1392 (size=4469) 2024-11-07T17:16:51,303 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,304 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:51,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fa529185c784427db29789bc258cf4a6_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999810354/Put/seqid=0 2024-11-07T17:16:51,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742217_1393 (size=12304) 2024-11-07T17:16:51,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:51,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T17:16:51,618 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#328 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:51,619 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/17ea650777b34428a3754817f565a45e is 175, key is test_row_0/A:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:51,622 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/dc147662f6ab4e809cc3719fbc70d4ef as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/dc147662f6ab4e809cc3719fbc70d4ef 2024-11-07T17:16:51,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742218_1394 (size=31447) 2024-11-07T17:16:51,628 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into dc147662f6ab4e809cc3719fbc70d4ef(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:51,628 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/17ea650777b34428a3754817f565a45e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17ea650777b34428a3754817f565a45e 2024-11-07T17:16:51,628 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:51,628 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999811201; duration=0sec 2024-11-07T17:16:51,628 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:51,628 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:51,628 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:51,629 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:51,629 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:51,629 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:51,629 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/ca32ee4e847a45f8a0ac29675c9e47aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/d1af0a67a08f4b9d81261f556950a386, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/50f55d1b5f374ec488f9e301f1197fcc] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=35.5 K 2024-11-07T17:16:51,630 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ca32ee4e847a45f8a0ac29675c9e47aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999806563 2024-11-07T17:16:51,630 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d1af0a67a08f4b9d81261f556950a386, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730999806913 2024-11-07T17:16:51,630 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 50f55d1b5f374ec488f9e301f1197fcc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730999809162 2024-11-07T17:16:51,632 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 17ea650777b34428a3754817f565a45e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:51,632 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:51,632 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999811201; duration=0sec 2024-11-07T17:16:51,633 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:51,633 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:51,639 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#330 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:51,639 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/4c16942abe404c0ea89a6b5dbe8832be is 50, key is test_row_0/C:col10/1730999809162/Put/seqid=0 2024-11-07T17:16:51,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742219_1395 (size=12493) 2024-11-07T17:16:51,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999871640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,648 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/4c16942abe404c0ea89a6b5dbe8832be as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4c16942abe404c0ea89a6b5dbe8832be 2024-11-07T17:16:51,652 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into 4c16942abe404c0ea89a6b5dbe8832be(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:51,652 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:51,652 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999811201; duration=0sec 2024-11-07T17:16:51,652 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:51,652 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:51,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:51,718 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fa529185c784427db29789bc258cf4a6_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fa529185c784427db29789bc258cf4a6_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:51,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cbe0725fffd0484ea453736bd158b1fd, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:51,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cbe0725fffd0484ea453736bd158b1fd is 175, key is test_row_0/A:col10/1730999810354/Put/seqid=0 2024-11-07T17:16:51,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742220_1396 (size=31105) 2024-11-07T17:16:51,724 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=167, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cbe0725fffd0484ea453736bd158b1fd 2024-11-07T17:16:51,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/413cdb9cf695419999dde4908aff8be8 is 50, key is test_row_0/B:col10/1730999810354/Put/seqid=0 2024-11-07T17:16:51,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742221_1397 (size=12151) 2024-11-07T17:16:51,734 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/413cdb9cf695419999dde4908aff8be8 2024-11-07T17:16:51,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c1d97eb430b6407ab7cc924234c29510 is 50, key is test_row_0/C:col10/1730999810354/Put/seqid=0 2024-11-07T17:16:51,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742222_1398 (size=12151) 2024-11-07T17:16:51,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999871747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:51,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999871951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:52,145 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c1d97eb430b6407ab7cc924234c29510 2024-11-07T17:16:52,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cbe0725fffd0484ea453736bd158b1fd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd 2024-11-07T17:16:52,154 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd, entries=150, sequenceid=167, filesize=30.4 K 2024-11-07T17:16:52,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/413cdb9cf695419999dde4908aff8be8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/413cdb9cf695419999dde4908aff8be8 2024-11-07T17:16:52,160 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/413cdb9cf695419999dde4908aff8be8, entries=150, sequenceid=167, filesize=11.9 K 2024-11-07T17:16:52,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c1d97eb430b6407ab7cc924234c29510 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c1d97eb430b6407ab7cc924234c29510 2024-11-07T17:16:52,164 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c1d97eb430b6407ab7cc924234c29510, entries=150, sequenceid=167, filesize=11.9 K 2024-11-07T17:16:52,166 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 73d52d69bc80a97d9d4aef7a7d44d969 in 862ms, sequenceid=167, compaction requested=false 2024-11-07T17:16:52,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:52,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-07T17:16:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-07T17:16:52,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-07T17:16:52,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7780 sec 2024-11-07T17:16:52,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.7830 sec 2024-11-07T17:16:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:52,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-07T17:16:52,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:52,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:52,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:52,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:52,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:52,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:52,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ed4cd1c770f64dd0b41dbbe52cb4dd50_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742223_1399 (size=14794) 2024-11-07T17:16:52,268 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:52,271 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ed4cd1c770f64dd0b41dbbe52cb4dd50_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ed4cd1c770f64dd0b41dbbe52cb4dd50_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:52,272 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/195e7e53a9a64b6287a0f5d4c0044549, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:52,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/195e7e53a9a64b6287a0f5d4c0044549 is 175, key is test_row_0/A:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742224_1400 (size=39749) 2024-11-07T17:16:52,277 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/195e7e53a9a64b6287a0f5d4c0044549 2024-11-07T17:16:52,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/8d952034f9fa4e3b98808ad051ea3c4a is 50, key is test_row_0/B:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742225_1401 (size=12151) 2024-11-07T17:16:52,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/8d952034f9fa4e3b98808ad051ea3c4a 2024-11-07T17:16:52,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c8a8c129966b42589b68e68b980dea5b is 50, key is test_row_0/C:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999872295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742226_1402 (size=12151) 2024-11-07T17:16:52,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999872402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T17:16:52,501 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-07T17:16:52,502 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-07T17:16:52,504 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T17:16:52,505 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:52,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T17:16:52,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999872609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:52,656 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:52,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-07T17:16:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:52,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c8a8c129966b42589b68e68b980dea5b 2024-11-07T17:16:52,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/195e7e53a9a64b6287a0f5d4c0044549 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549 2024-11-07T17:16:52,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549, entries=200, sequenceid=197, filesize=38.8 K 2024-11-07T17:16:52,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/8d952034f9fa4e3b98808ad051ea3c4a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/8d952034f9fa4e3b98808ad051ea3c4a 2024-11-07T17:16:52,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/8d952034f9fa4e3b98808ad051ea3c4a, entries=150, sequenceid=197, filesize=11.9 K 2024-11-07T17:16:52,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/c8a8c129966b42589b68e68b980dea5b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c8a8c129966b42589b68e68b980dea5b 2024-11-07T17:16:52,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c8a8c129966b42589b68e68b980dea5b, entries=150, sequenceid=197, filesize=11.9 K 2024-11-07T17:16:52,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 73d52d69bc80a97d9d4aef7a7d44d969 in 477ms, sequenceid=197, compaction requested=true 2024-11-07T17:16:52,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:52,734 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:52,735 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:52,736 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:52,736 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:52,736 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,736 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17ea650777b34428a3754817f565a45e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=99.9 K 2024-11-07T17:16:52,736 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,736 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17ea650777b34428a3754817f565a45e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549] 2024-11-07T17:16:52,737 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:52,737 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:52,737 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,737 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/dc147662f6ab4e809cc3719fbc70d4ef, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/413cdb9cf695419999dde4908aff8be8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/8d952034f9fa4e3b98808ad051ea3c4a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=35.9 K 2024-11-07T17:16:52,738 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17ea650777b34428a3754817f565a45e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730999809162 2024-11-07T17:16:52,738 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting dc147662f6ab4e809cc3719fbc70d4ef, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730999809162 2024-11-07T17:16:52,738 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 413cdb9cf695419999dde4908aff8be8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1730999810343 2024-11-07T17:16:52,738 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbe0725fffd0484ea453736bd158b1fd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1730999810343 2024-11-07T17:16:52,739 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d952034f9fa4e3b98808ad051ea3c4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1730999811526 2024-11-07T17:16:52,739 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 195e7e53a9a64b6287a0f5d4c0044549, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1730999811526 2024-11-07T17:16:52,763 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:52,763 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/46326101af214751b036dadd8bd0d9ac is 50, key is test_row_0/B:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,779 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T17:16:52,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:52,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,810 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:52,818 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107af63db7dd4264e39862d85ef3e4a36b8_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:52,820 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107af63db7dd4264e39862d85ef3e4a36b8_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:52,820 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107af63db7dd4264e39862d85ef3e4a36b8_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:52,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742227_1403 (size=12595) 2024-11-07T17:16:52,826 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/46326101af214751b036dadd8bd0d9ac as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/46326101af214751b036dadd8bd0d9ac 2024-11-07T17:16:52,832 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into 46326101af214751b036dadd8bd0d9ac(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:52,832 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:52,832 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999812735; duration=0sec 2024-11-07T17:16:52,832 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:52,832 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:52,832 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:52,833 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:52,833 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:52,833 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:52,833 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4c16942abe404c0ea89a6b5dbe8832be, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c1d97eb430b6407ab7cc924234c29510, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c8a8c129966b42589b68e68b980dea5b] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=35.9 K 2024-11-07T17:16:52,834 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c16942abe404c0ea89a6b5dbe8832be, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730999809162 2024-11-07T17:16:52,834 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c1d97eb430b6407ab7cc924234c29510, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1730999810343 2024-11-07T17:16:52,835 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c8a8c129966b42589b68e68b980dea5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1730999811526 2024-11-07T17:16:52,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079bdc6461900f4f959f51c589046b43da_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999812294/Put/seqid=0 2024-11-07T17:16:52,877 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:52,878 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3bf941197a4f494b910fe70af537b346 is 50, key is test_row_0/C:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742228_1404 (size=4469) 2024-11-07T17:16:52,922 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#337 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:52,923 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/2d75de595fd44740948445bd916471ff is 175, key is test_row_0/A:col10/1730999812255/Put/seqid=0 2024-11-07T17:16:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:52,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742230_1406 (size=12595) 2024-11-07T17:16:52,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742231_1407 (size=31549) 2024-11-07T17:16:52,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742229_1405 (size=12304) 2024-11-07T17:16:52,966 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/2d75de595fd44740948445bd916471ff as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2d75de595fd44740948445bd916471ff 2024-11-07T17:16:52,970 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3bf941197a4f494b910fe70af537b346 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3bf941197a4f494b910fe70af537b346 2024-11-07T17:16:52,973 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 2d75de595fd44740948445bd916471ff(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:52,973 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:52,973 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999812734; duration=0sec 2024-11-07T17:16:52,973 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:52,973 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:52,975 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into 3bf941197a4f494b910fe70af537b346(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:52,975 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:52,975 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999812735; duration=0sec 2024-11-07T17:16:52,975 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:52,975 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T17:16:53,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999873169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:53,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999873283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:53,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:53,390 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079bdc6461900f4f959f51c589046b43da_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079bdc6461900f4f959f51c589046b43da_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:53,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9cc37c8a37c44aafbc9bf506740259f1, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:53,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9cc37c8a37c44aafbc9bf506740259f1 is 175, key is test_row_0/A:col10/1730999812294/Put/seqid=0 2024-11-07T17:16:53,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742232_1408 (size=31105) 2024-11-07T17:16:53,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999873495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T17:16:53,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:53,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999873804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:53,880 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=206, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9cc37c8a37c44aafbc9bf506740259f1 2024-11-07T17:16:53,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/bee6d76922b1418e86d2e34da0b7e512 is 50, key is test_row_0/B:col10/1730999812294/Put/seqid=0 2024-11-07T17:16:53,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742233_1409 (size=12151) 2024-11-07T17:16:53,994 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/bee6d76922b1418e86d2e34da0b7e512 2024-11-07T17:16:54,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/398218e6a22e4faab9e861bdad770f47 is 50, key is test_row_0/C:col10/1730999812294/Put/seqid=0 2024-11-07T17:16:54,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742234_1410 (size=12151) 2024-11-07T17:16:54,041 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/398218e6a22e4faab9e861bdad770f47 2024-11-07T17:16:54,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9cc37c8a37c44aafbc9bf506740259f1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1 2024-11-07T17:16:54,055 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1, entries=150, sequenceid=206, filesize=30.4 K 2024-11-07T17:16:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/bee6d76922b1418e86d2e34da0b7e512 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/bee6d76922b1418e86d2e34da0b7e512 2024-11-07T17:16:54,063 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/bee6d76922b1418e86d2e34da0b7e512, entries=150, sequenceid=206, filesize=11.9 K 2024-11-07T17:16:54,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/398218e6a22e4faab9e861bdad770f47 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/398218e6a22e4faab9e861bdad770f47 2024-11-07T17:16:54,072 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/398218e6a22e4faab9e861bdad770f47, entries=150, sequenceid=206, filesize=11.9 K 2024-11-07T17:16:54,073 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1263ms, sequenceid=206, compaction requested=false 2024-11-07T17:16:54,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:54,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:54,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-07T17:16:54,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-07T17:16:54,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-07T17:16:54,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5690 sec 2024-11-07T17:16:54,091 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.5800 sec 2024-11-07T17:16:54,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:54,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-07T17:16:54,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:54,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:54,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:54,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:54,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:54,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:54,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072758d47d33474869b730915b87b883ae_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:54,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:54,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999874376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742235_1411 (size=14794) 2024-11-07T17:16:54,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:54,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999874491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:54,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T17:16:54,618 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-07T17:16:54,619 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:54,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-07T17:16:54,621 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:54,622 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:54,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:54,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-07T17:16:54,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:54,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999874703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:54,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-07T17:16:54,774 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:54,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-07T17:16:54,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:54,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:54,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:54,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:54,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:54,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:54,811 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:54,815 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072758d47d33474869b730915b87b883ae_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072758d47d33474869b730915b87b883ae_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:54,816 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/130c19de8ecf4308ab65c13bcc1359ab, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:54,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/130c19de8ecf4308ab65c13bcc1359ab is 175, key is test_row_0/A:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:54,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742236_1412 (size=39749) 2024-11-07T17:16:54,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-07T17:16:54,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:54,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-07T17:16:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:54,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:54,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:55,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999875012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:55,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:55,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-07T17:16:55,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:55,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-07T17:16:55,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:55,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-07T17:16:55,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:55,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,289 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/130c19de8ecf4308ab65c13bcc1359ab 2024-11-07T17:16:55,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/9f73a7d2c0c84562bab8965e7897e8d7 is 50, key is test_row_0/B:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:55,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742237_1413 (size=12151) 2024-11-07T17:16:55,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/9f73a7d2c0c84562bab8965e7897e8d7 2024-11-07T17:16:55,388 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:55,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-07T17:16:55,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:55,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:55,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3cc7077826904aeea05c865d93155960 is 50, key is test_row_0/C:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:55,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742238_1414 (size=12151) 2024-11-07T17:16:55,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3cc7077826904aeea05c865d93155960 2024-11-07T17:16:55,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/130c19de8ecf4308ab65c13bcc1359ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab 2024-11-07T17:16:55,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab, entries=200, sequenceid=237, filesize=38.8 K 2024-11-07T17:16:55,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/9f73a7d2c0c84562bab8965e7897e8d7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9f73a7d2c0c84562bab8965e7897e8d7 2024-11-07T17:16:55,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9f73a7d2c0c84562bab8965e7897e8d7, entries=150, sequenceid=237, filesize=11.9 K 2024-11-07T17:16:55,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3cc7077826904aeea05c865d93155960 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3cc7077826904aeea05c865d93155960 2024-11-07T17:16:55,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3cc7077826904aeea05c865d93155960, entries=150, sequenceid=237, filesize=11.9 K 2024-11-07T17:16:55,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1181ms, sequenceid=237, compaction requested=true 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:55,513 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:55,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:55,513 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:55,514 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:55,514 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:55,514 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,515 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2d75de595fd44740948445bd916471ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=100.0 K 2024-11-07T17:16:55,515 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,515 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2d75de595fd44740948445bd916471ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab] 2024-11-07T17:16:55,515 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:55,515 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:55,515 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,515 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/46326101af214751b036dadd8bd0d9ac, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/bee6d76922b1418e86d2e34da0b7e512, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9f73a7d2c0c84562bab8965e7897e8d7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=36.0 K 2024-11-07T17:16:55,517 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d75de595fd44740948445bd916471ff, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1730999811526 2024-11-07T17:16:55,517 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 46326101af214751b036dadd8bd0d9ac, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1730999811526 2024-11-07T17:16:55,517 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cc37c8a37c44aafbc9bf506740259f1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730999812279 2024-11-07T17:16:55,517 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting bee6d76922b1418e86d2e34da0b7e512, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730999812279 2024-11-07T17:16:55,517 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 130c19de8ecf4308ab65c13bcc1359ab, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730999813126 2024-11-07T17:16:55,518 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f73a7d2c0c84562bab8965e7897e8d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730999813155 2024-11-07T17:16:55,545 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:55,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-07T17:16:55,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,546 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-07T17:16:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:55,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:55,547 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#345 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:55,548 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/a3902fa7ec38480083f614e240938497 is 50, key is test_row_0/B:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:55,550 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:55,560 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107d459f88b3bc24744949a70ad1009d11c_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:55,562 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107d459f88b3bc24744949a70ad1009d11c_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:55,563 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d459f88b3bc24744949a70ad1009d11c_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:55,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742239_1415 (size=4469) 2024-11-07T17:16:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c5b61b92b3174e2293629db29c6fc247_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999815527/Put/seqid=0 2024-11-07T17:16:55,625 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#346 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:55,626 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9ba7098337c84dd197240a1c819511fe is 175, key is test_row_0/A:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:55,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742240_1416 (size=12697) 2024-11-07T17:16:55,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742241_1417 (size=31651) 2024-11-07T17:16:55,712 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/9ba7098337c84dd197240a1c819511fe as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9ba7098337c84dd197240a1c819511fe 2024-11-07T17:16:55,724 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 9ba7098337c84dd197240a1c819511fe(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:55,725 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:55,725 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999815513; duration=0sec 2024-11-07T17:16:55,725 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:55,725 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:55,725 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:55,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742242_1418 (size=12304) 2024-11-07T17:16:55,727 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:55,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:55,727 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:55,727 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:55,727 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3bf941197a4f494b910fe70af537b346, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/398218e6a22e4faab9e861bdad770f47, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3cc7077826904aeea05c865d93155960] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=36.0 K 2024-11-07T17:16:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-07T17:16:55,728 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bf941197a4f494b910fe70af537b346, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1730999811526 2024-11-07T17:16:55,728 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 398218e6a22e4faab9e861bdad770f47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730999812279 2024-11-07T17:16:55,729 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cc7077826904aeea05c865d93155960, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730999813155 2024-11-07T17:16:55,732 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c5b61b92b3174e2293629db29c6fc247_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c5b61b92b3174e2293629db29c6fc247_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:55,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/fd1a486af0b54316bee531dda46b355b, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:55,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/fd1a486af0b54316bee531dda46b355b is 175, key is test_row_0/A:col10/1730999815527/Put/seqid=0 2024-11-07T17:16:55,743 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:55,744 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/39b074b4c4a54f39883c287ff5bbd544 is 50, key is test_row_0/C:col10/1730999814329/Put/seqid=0 2024-11-07T17:16:55,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742243_1419 (size=31105) 2024-11-07T17:16:55,793 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/fd1a486af0b54316bee531dda46b355b 2024-11-07T17:16:55,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742244_1420 (size=12697) 2024-11-07T17:16:55,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/c8512bf27b744e00bd2da8296d047562 is 50, key is test_row_0/B:col10/1730999815527/Put/seqid=0 2024-11-07T17:16:55,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999875848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:55,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742245_1421 (size=12151) 2024-11-07T17:16:55,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999875950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:56,084 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/a3902fa7ec38480083f614e240938497 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a3902fa7ec38480083f614e240938497 2024-11-07T17:16:56,090 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into a3902fa7ec38480083f614e240938497(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:56,090 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:56,090 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999815513; duration=0sec 2024-11-07T17:16:56,090 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:56,090 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:56,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:56,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999876159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:56,209 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/39b074b4c4a54f39883c287ff5bbd544 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/39b074b4c4a54f39883c287ff5bbd544 2024-11-07T17:16:56,234 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into 39b074b4c4a54f39883c287ff5bbd544(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:56,234 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:56,234 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999815513; duration=0sec 2024-11-07T17:16:56,234 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:56,234 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:56,254 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/c8512bf27b744e00bd2da8296d047562 2024-11-07T17:16:56,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/a74cdc15e9f84e2ebbc44c7ebc43b767 is 50, key is test_row_0/C:col10/1730999815527/Put/seqid=0 2024-11-07T17:16:56,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742246_1422 (size=12151) 2024-11-07T17:16:56,349 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/a74cdc15e9f84e2ebbc44c7ebc43b767 2024-11-07T17:16:56,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/fd1a486af0b54316bee531dda46b355b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b 2024-11-07T17:16:56,442 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b, entries=150, sequenceid=246, filesize=30.4 K 2024-11-07T17:16:56,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/c8512bf27b744e00bd2da8296d047562 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/c8512bf27b744e00bd2da8296d047562 2024-11-07T17:16:56,461 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/c8512bf27b744e00bd2da8296d047562, entries=150, sequenceid=246, filesize=11.9 K 2024-11-07T17:16:56,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/a74cdc15e9f84e2ebbc44c7ebc43b767 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/a74cdc15e9f84e2ebbc44c7ebc43b767 2024-11-07T17:16:56,473 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/a74cdc15e9f84e2ebbc44c7ebc43b767, entries=150, sequenceid=246, filesize=11.9 K 2024-11-07T17:16:56,474 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 73d52d69bc80a97d9d4aef7a7d44d969 in 929ms, sequenceid=246, compaction requested=false 2024-11-07T17:16:56,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:56,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:56,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-07T17:16:56,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-07T17:16:56,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-07T17:16:56,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8530 sec 2024-11-07T17:16:56,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8580 sec 2024-11-07T17:16:56,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:56,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-07T17:16:56,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:56,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:56,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:56,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:56,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:56,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:56,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999876503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:56,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411073f75a1946baa42d5bce3e121668b0262_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:56,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742247_1423 (size=17534) 2024-11-07T17:16:56,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:56,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999876611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:56,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-07T17:16:56,730 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-07T17:16:56,732 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:56,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-07T17:16:56,743 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:56,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T17:16:56,744 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:56,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:56,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999876821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:56,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T17:16:56,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:56,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-07T17:16:56,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:56,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:56,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:56,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:56,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:56,973 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:56,999 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411073f75a1946baa42d5bce3e121668b0262_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073f75a1946baa42d5bce3e121668b0262_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:57,001 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/44f4509781ac491595a5267e65b0b635, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:57,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/44f4509781ac491595a5267e65b0b635 is 175, key is test_row_0/A:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:57,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742248_1424 (size=48639) 2024-11-07T17:16:57,027 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/44f4509781ac491595a5267e65b0b635 2024-11-07T17:16:57,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ba79ac6f28d545b79ebf8287942c2875 is 50, key is test_row_0/B:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T17:16:57,053 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:57,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-07T17:16:57,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:57,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:57,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:57,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:57,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742249_1425 (size=12301) 2024-11-07T17:16:57,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ba79ac6f28d545b79ebf8287942c2875 2024-11-07T17:16:57,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2a700339086a40ba818dcf828d4d70e2 is 50, key is test_row_0/C:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:57,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742250_1426 (size=12301) 2024-11-07T17:16:57,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2a700339086a40ba818dcf828d4d70e2 2024-11-07T17:16:57,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/44f4509781ac491595a5267e65b0b635 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635 2024-11-07T17:16:57,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635, entries=250, sequenceid=278, filesize=47.5 K 2024-11-07T17:16:57,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ba79ac6f28d545b79ebf8287942c2875 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ba79ac6f28d545b79ebf8287942c2875 2024-11-07T17:16:57,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ba79ac6f28d545b79ebf8287942c2875, entries=150, sequenceid=278, filesize=12.0 K 2024-11-07T17:16:57,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2a700339086a40ba818dcf828d4d70e2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2a700339086a40ba818dcf828d4d70e2 2024-11-07T17:16:57,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2a700339086a40ba818dcf828d4d70e2, entries=150, sequenceid=278, filesize=12.0 K 2024-11-07T17:16:57,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 73d52d69bc80a97d9d4aef7a7d44d969 in 648ms, sequenceid=278, compaction requested=true 2024-11-07T17:16:57,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:57,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:57,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:57,128 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:57,128 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:57,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,130 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111395 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:57,130 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:57,130 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,130 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9ba7098337c84dd197240a1c819511fe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=108.8 K 2024-11-07T17:16:57,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,130 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:57,130 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9ba7098337c84dd197240a1c819511fe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635] 2024-11-07T17:16:57,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:57,131 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:57,131 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:57,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:57,131 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:57,131 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a3902fa7ec38480083f614e240938497, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/c8512bf27b744e00bd2da8296d047562, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ba79ac6f28d545b79ebf8287942c2875] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=36.3 K 2024-11-07T17:16:57,131 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ba7098337c84dd197240a1c819511fe, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730999813155 2024-11-07T17:16:57,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,131 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a3902fa7ec38480083f614e240938497, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730999813155 2024-11-07T17:16:57,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,132 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd1a486af0b54316bee531dda46b355b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1730999814355 2024-11-07T17:16:57,132 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c8512bf27b744e00bd2da8296d047562, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1730999814355 2024-11-07T17:16:57,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,132 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44f4509781ac491595a5267e65b0b635, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730999815742 2024-11-07T17:16:57,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,132 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ba79ac6f28d545b79ebf8287942c2875, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730999815822 2024-11-07T17:16:57,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,149 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#354 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:57,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,149 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/85af3b1e81ec40a09c710262746e9778 is 50, key is test_row_0/B:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:57,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,161 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742251_1427 (size=12949) 2024-11-07T17:16:57,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,201 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107b32f6ddb96b14c6785d8c66e4a0bdc96_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:57,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,202 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107b32f6ddb96b14c6785d8c66e4a0bdc96_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:57,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,203 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b32f6ddb96b14c6785d8c66e4a0bdc96_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,208 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:57,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-07T17:16:57,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,209 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-07T17:16:57,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:57,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,229 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/85af3b1e81ec40a09c710262746e9778 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/85af3b1e81ec40a09c710262746e9778 2024-11-07T17:16:57,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,236 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into 85af3b1e81ec40a09c710262746e9778(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:57,236 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:57,236 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999817128; duration=0sec 2024-11-07T17:16:57,236 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:57,236 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:57,236 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:57,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,237 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:57,237 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:57,237 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,237 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/39b074b4c4a54f39883c287ff5bbd544, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/a74cdc15e9f84e2ebbc44c7ebc43b767, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2a700339086a40ba818dcf828d4d70e2] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=36.3 K 2024-11-07T17:16:57,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,238 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 39b074b4c4a54f39883c287ff5bbd544, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730999813155 2024-11-07T17:16:57,238 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a74cdc15e9f84e2ebbc44c7ebc43b767, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1730999814355 2024-11-07T17:16:57,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,239 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a700339086a40ba818dcf828d4d70e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730999815822 2024-11-07T17:16:57,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c839d99ac76d4ec9bf653076334f5cc7_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999816492/Put/seqid=0 2024-11-07T17:16:57,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742252_1428 (size=4469) 2024-11-07T17:16:57,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,298 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#355 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:57,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,299 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/5acd73736d4a422f9fbd40fbdfe9b244 is 175, key is test_row_0/A:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:57,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,305 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#357 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:57,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,305 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/e78f4e4304874b96ae5584667ab4a1f7 is 50, key is test_row_0/C:col10/1730999815822/Put/seqid=0 2024-11-07T17:16:57,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742253_1429 (size=9914) 2024-11-07T17:16:57,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,338 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c839d99ac76d4ec9bf653076334f5cc7_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c839d99ac76d4ec9bf653076334f5cc7_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/152d5fac8ffe4d6abcd272d140a829de, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:57,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/152d5fac8ffe4d6abcd272d140a829de is 175, key is test_row_0/A:col10/1730999816492/Put/seqid=0 2024-11-07T17:16:57,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T17:16:57,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742256_1432 (size=22561) 2024-11-07T17:16:57,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,417 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/152d5fac8ffe4d6abcd272d140a829de 2024-11-07T17:16:57,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2e8ae0b0f7674a79aec4d48d63798091 is 50, key is test_row_0/B:col10/1730999816492/Put/seqid=0 2024-11-07T17:16:57,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742254_1430 (size=31903) 2024-11-07T17:16:57,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742255_1431 (size=12949) 2024-11-07T17:16:57,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,442 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/e78f4e4304874b96ae5584667ab4a1f7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e78f4e4304874b96ae5584667ab4a1f7 2024-11-07T17:16:57,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,453 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into e78f4e4304874b96ae5584667ab4a1f7(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:57,453 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:57,453 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999817130; duration=0sec 2024-11-07T17:16:57,453 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:57,453 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:57,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742257_1433 (size=9857) 2024-11-07T17:16:57,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,460 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2e8ae0b0f7674a79aec4d48d63798091 2024-11-07T17:16:57,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/df1018e0d3594d1485fc092811640f2a is 50, key is test_row_0/C:col10/1730999816492/Put/seqid=0 2024-11-07T17:16:57,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:57,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:57,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742258_1434 (size=9857) 2024-11-07T17:16:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:57,845 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/5acd73736d4a422f9fbd40fbdfe9b244 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/5acd73736d4a422f9fbd40fbdfe9b244 2024-11-07T17:16:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T17:16:57,849 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 5acd73736d4a422f9fbd40fbdfe9b244(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:57,849 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:57,849 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999817128; duration=0sec 2024-11-07T17:16:57,849 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:57,849 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:57,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999877906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:57,947 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/df1018e0d3594d1485fc092811640f2a 2024-11-07T17:16:57,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/152d5fac8ffe4d6abcd272d140a829de as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de 2024-11-07T17:16:57,976 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de, entries=100, sequenceid=285, filesize=22.0 K 2024-11-07T17:16:57,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/2e8ae0b0f7674a79aec4d48d63798091 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2e8ae0b0f7674a79aec4d48d63798091 2024-11-07T17:16:57,980 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2e8ae0b0f7674a79aec4d48d63798091, entries=100, sequenceid=285, filesize=9.6 K 2024-11-07T17:16:57,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/df1018e0d3594d1485fc092811640f2a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/df1018e0d3594d1485fc092811640f2a 2024-11-07T17:16:57,990 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/df1018e0d3594d1485fc092811640f2a, entries=100, sequenceid=285, filesize=9.6 K 2024-11-07T17:16:57,990 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 73d52d69bc80a97d9d4aef7a7d44d969 in 781ms, sequenceid=285, compaction requested=false 2024-11-07T17:16:57,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:57,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:57,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-07T17:16:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-07T17:16:57,994 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-07T17:16:57,994 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2490 sec 2024-11-07T17:16:57,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.2630 sec 2024-11-07T17:16:58,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-07T17:16:58,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:58,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:58,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:58,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:58,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:58,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:58,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107678ccf16dcf04c049c9ccf2b6a69db76_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:58,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999878079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:58,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742259_1435 (size=14994) 2024-11-07T17:16:58,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:58,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999878198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:58,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999878409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:58,556 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:58,562 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107678ccf16dcf04c049c9ccf2b6a69db76_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107678ccf16dcf04c049c9ccf2b6a69db76_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:58,566 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/15ec1c624fa9485cb6c805f9f8bedcf0, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:58,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/15ec1c624fa9485cb6c805f9f8bedcf0 is 175, key is test_row_0/A:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:58,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742260_1436 (size=39949) 2024-11-07T17:16:58,581 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/15ec1c624fa9485cb6c805f9f8bedcf0 2024-11-07T17:16:58,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/6351b97ffa30420ea26f0445191c0131 is 50, key is test_row_0/B:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:58,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742261_1437 (size=12301) 2024-11-07T17:16:58,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/6351b97ffa30420ea26f0445191c0131 2024-11-07T17:16:58,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2d98a32b1e384d26aa63dc31cd059f1f is 50, key is test_row_0/C:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:58,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742262_1438 (size=12301) 2024-11-07T17:16:58,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:58,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999878716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:58,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T17:16:58,849 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-07T17:16:58,850 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:16:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-07T17:16:58,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-07T17:16:58,851 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:16:58,852 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:16:58,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:16:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-07T17:16:59,004 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:59,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-07T17:16:59,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:59,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:59,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:16:59,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2d98a32b1e384d26aa63dc31cd059f1f 2024-11-07T17:16:59,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/15ec1c624fa9485cb6c805f9f8bedcf0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0 2024-11-07T17:16:59,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0, entries=200, sequenceid=318, filesize=39.0 K 2024-11-07T17:16:59,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/6351b97ffa30420ea26f0445191c0131 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6351b97ffa30420ea26f0445191c0131 2024-11-07T17:16:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6351b97ffa30420ea26f0445191c0131, entries=150, sequenceid=318, filesize=12.0 K 2024-11-07T17:16:59,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2d98a32b1e384d26aa63dc31cd059f1f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2d98a32b1e384d26aa63dc31cd059f1f 2024-11-07T17:16:59,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2d98a32b1e384d26aa63dc31cd059f1f, entries=150, sequenceid=318, filesize=12.0 K 2024-11-07T17:16:59,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1094ms, sequenceid=318, compaction requested=true 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:16:59,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-07T17:16:59,121 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:59,121 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:59,122 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:59,122 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:16:59,122 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,122 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e78f4e4304874b96ae5584667ab4a1f7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/df1018e0d3594d1485fc092811640f2a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2d98a32b1e384d26aa63dc31cd059f1f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=34.3 K 2024-11-07T17:16:59,122 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:59,123 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:16:59,123 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,123 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/5acd73736d4a422f9fbd40fbdfe9b244, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=92.2 K 2024-11-07T17:16:59,123 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,123 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/5acd73736d4a422f9fbd40fbdfe9b244, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0] 2024-11-07T17:16:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,136 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e78f4e4304874b96ae5584667ab4a1f7, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730999815822 2024-11-07T17:16:59,136 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5acd73736d4a422f9fbd40fbdfe9b244, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730999815822 2024-11-07T17:16:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,136 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 152d5fac8ffe4d6abcd272d140a829de, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999816492 2024-11-07T17:16:59,136 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting df1018e0d3594d1485fc092811640f2a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999816492 2024-11-07T17:16:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,137 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d98a32b1e384d26aa63dc31cd059f1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1730999817852 2024-11-07T17:16:59,137 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15ec1c624fa9485cb6c805f9f8bedcf0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1730999817852 2024-11-07T17:16:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,144 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,146 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107d9d576a7c327489f84c7d434ce075f56_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,148 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107d9d576a7c327489f84c7d434ce075f56_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:59,148 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d9d576a7c327489f84c7d434ce075f56_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-07T17:16:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:16:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,158 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:16:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:16:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:16:59,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,165 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#364 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,166 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/acdce5bf56cc4a1eba47aea4bebac0f3 is 50, key is test_row_0/C:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c892cdbe2d644e7089986a41989e362a_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999818056/Put/seqid=0 2024-11-07T17:16:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742264_1440 (size=13051) 2024-11-07T17:16:59,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742263_1439 (size=4469) 2024-11-07T17:16:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,207 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#363 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,207 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/17faae1995b14d86a311fcde16e16f2e is 175, key is test_row_0/A:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742265_1441 (size=9914) 2024-11-07T17:16:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742266_1442 (size=32005) 2024-11-07T17:16:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,267 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/17faae1995b14d86a311fcde16e16f2e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17faae1995b14d86a311fcde16e16f2e 2024-11-07T17:16:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,272 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into 17faae1995b14d86a311fcde16e16f2e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:59,272 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:59,273 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999819120; duration=0sec 2024-11-07T17:16:59,273 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:16:59,273 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:16:59,273 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:16:59,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,274 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:16:59,274 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:16:59,274 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:16:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,274 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/85af3b1e81ec40a09c710262746e9778, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2e8ae0b0f7674a79aec4d48d63798091, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6351b97ffa30420ea26f0445191c0131] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=34.3 K 2024-11-07T17:16:59,274 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85af3b1e81ec40a09c710262746e9778, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1730999815822 2024-11-07T17:16:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,275 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e8ae0b0f7674a79aec4d48d63798091, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1730999816492 2024-11-07T17:16:59,275 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6351b97ffa30420ea26f0445191c0131, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1730999817852 2024-11-07T17:16:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,281 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:16:59,282 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/be01ba2b216f4779b77d6e97e2622837 is 50, key is test_row_0/B:col10/1730999818024/Put/seqid=0 2024-11-07T17:16:59,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742267_1443 (size=13051) 2024-11-07T17:16:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,303 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/be01ba2b216f4779b77d6e97e2622837 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/be01ba2b216f4779b77d6e97e2622837 2024-11-07T17:16:59,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,309 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into be01ba2b216f4779b77d6e97e2622837(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:59,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,309 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:59,309 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999819120; duration=0sec 2024-11-07T17:16:59,309 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:59,309 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:16:59,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. as already flushing 2024-11-07T17:16:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-07T17:16:59,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999879593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:59,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:16:59,613 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/acdce5bf56cc4a1eba47aea4bebac0f3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/acdce5bf56cc4a1eba47aea4bebac0f3 2024-11-07T17:16:59,615 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c892cdbe2d644e7089986a41989e362a_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c892cdbe2d644e7089986a41989e362a_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:16:59,618 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into acdce5bf56cc4a1eba47aea4bebac0f3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:16:59,618 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:16:59,618 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999819120; duration=0sec 2024-11-07T17:16:59,618 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:16:59,618 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:16:59,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/50b02fb78bd8408da6c9223c5232b7bf, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:16:59,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/50b02fb78bd8408da6c9223c5232b7bf is 175, key is test_row_0/A:col10/1730999818056/Put/seqid=0 2024-11-07T17:16:59,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742268_1444 (size=22561) 2024-11-07T17:16:59,667 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/50b02fb78bd8408da6c9223c5232b7bf 2024-11-07T17:16:59,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ea00e0aa44e84651a21241cf3780a1d0 is 50, key is test_row_0/B:col10/1730999818056/Put/seqid=0 2024-11-07T17:16:59,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 333 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999879703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:59,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742269_1445 (size=9857) 2024-11-07T17:16:59,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:16:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 335 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999879919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:16:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-07T17:17:00,084 DEBUG [Thread-1593 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:64938 2024-11-07T17:17:00,084 DEBUG [Thread-1593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:00,086 DEBUG [Thread-1599 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7284f16d to 127.0.0.1:64938 2024-11-07T17:17:00,086 DEBUG [Thread-1599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:00,088 DEBUG [Thread-1597 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:64938 2024-11-07T17:17:00,088 DEBUG [Thread-1597 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:00,091 DEBUG [Thread-1595 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e3203d9 to 127.0.0.1:64938 2024-11-07T17:17:00,091 DEBUG [Thread-1595 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:00,091 DEBUG [Thread-1601 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37a637ac to 127.0.0.1:64938 2024-11-07T17:17:00,092 DEBUG [Thread-1601 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:00,149 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ea00e0aa44e84651a21241cf3780a1d0 2024-11-07T17:17:00,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3a4a4f7151c4496abb771a1cef7215ec is 50, key is test_row_0/C:col10/1730999818056/Put/seqid=0 2024-11-07T17:17:00,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742270_1446 (size=9857) 2024-11-07T17:17:00,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 337 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46582 deadline: 1730999880227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:00,569 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3a4a4f7151c4496abb771a1cef7215ec 2024-11-07T17:17:00,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/50b02fb78bd8408da6c9223c5232b7bf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf 2024-11-07T17:17:00,577 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf, entries=100, sequenceid=324, filesize=22.0 K 2024-11-07T17:17:00,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ea00e0aa44e84651a21241cf3780a1d0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ea00e0aa44e84651a21241cf3780a1d0 2024-11-07T17:17:00,597 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ea00e0aa44e84651a21241cf3780a1d0, entries=100, sequenceid=324, filesize=9.6 K 2024-11-07T17:17:00,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/3a4a4f7151c4496abb771a1cef7215ec as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3a4a4f7151c4496abb771a1cef7215ec 2024-11-07T17:17:00,602 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3a4a4f7151c4496abb771a1cef7215ec, entries=100, sequenceid=324, filesize=9.6 K 2024-11-07T17:17:00,603 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1445ms, sequenceid=324, compaction requested=false 2024-11-07T17:17:00,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:17:00,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:00,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-07T17:17:00,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-07T17:17:00,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-07T17:17:00,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7530 sec 2024-11-07T17:17:00,609 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.7570 sec 2024-11-07T17:17:00,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:00,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-07T17:17:00,735 DEBUG [Thread-1584 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:64938 2024-11-07T17:17:00,735 DEBUG [Thread-1584 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:17:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:17:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:17:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:00,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107287b6d6d10ef4b2c9da357b03425ca28_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:00,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742271_1447 (size=12454) 2024-11-07T17:17:00,767 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:00,771 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107287b6d6d10ef4b2c9da357b03425ca28_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107287b6d6d10ef4b2c9da357b03425ca28_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:00,772 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cd769ed8a4a449ce811c89ca54d741d8, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:17:00,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cd769ed8a4a449ce811c89ca54d741d8 is 175, key is test_row_0/A:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:00,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742272_1448 (size=31255) 2024-11-07T17:17:00,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-07T17:17:00,964 INFO [Thread-1592 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-07T17:17:01,159 DEBUG [Thread-1586 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:64938 2024-11-07T17:17:01,160 DEBUG [Thread-1586 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:01,161 DEBUG [Thread-1588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:64938 2024-11-07T17:17:01,161 DEBUG [Thread-1588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:01,176 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cd769ed8a4a449ce811c89ca54d741d8 2024-11-07T17:17:01,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ac51f2d770394540b11013ed754cfcc7 is 50, key is test_row_0/B:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:01,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46662 deadline: 1730999881192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:01,193 DEBUG [Thread-1582 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18254 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:17:01,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46642 deadline: 1730999881195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:01,196 DEBUG [Thread-1590 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18266 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:17:01,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742273_1449 (size=12301) 2024-11-07T17:17:01,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ac51f2d770394540b11013ed754cfcc7 2024-11-07T17:17:01,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/8d00cd27506942cb84478b63f89ddf93 is 50, key is test_row_0/C:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:01,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742274_1450 (size=12301) 2024-11-07T17:17:01,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/8d00cd27506942cb84478b63f89ddf93 2024-11-07T17:17:01,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/cd769ed8a4a449ce811c89ca54d741d8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8 2024-11-07T17:17:01,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8, entries=150, sequenceid=358, filesize=30.5 K 2024-11-07T17:17:01,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/ac51f2d770394540b11013ed754cfcc7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ac51f2d770394540b11013ed754cfcc7 2024-11-07T17:17:01,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ac51f2d770394540b11013ed754cfcc7, entries=150, sequenceid=358, filesize=12.0 K 2024-11-07T17:17:01,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/8d00cd27506942cb84478b63f89ddf93 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/8d00cd27506942cb84478b63f89ddf93 2024-11-07T17:17:01,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/8d00cd27506942cb84478b63f89ddf93, entries=150, sequenceid=358, filesize=12.0 K 2024-11-07T17:17:01,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 73d52d69bc80a97d9d4aef7a7d44d969 in 930ms, sequenceid=358, compaction requested=true 2024-11-07T17:17:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:17:01,664 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:01,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:01,666 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:01,666 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/A is initiating minor compaction (all files) 2024-11-07T17:17:01,666 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/A in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:01,666 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17faae1995b14d86a311fcde16e16f2e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=83.8 K 2024-11-07T17:17:01,666 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:01,666 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17faae1995b14d86a311fcde16e16f2e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8] 2024-11-07T17:17:01,666 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17faae1995b14d86a311fcde16e16f2e, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1730999817852 2024-11-07T17:17:01,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:01,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:01,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:01,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73d52d69bc80a97d9d4aef7a7d44d969:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:01,667 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:01,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:01,668 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50b02fb78bd8408da6c9223c5232b7bf, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1730999818042 2024-11-07T17:17:01,668 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd769ed8a4a449ce811c89ca54d741d8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1730999819481 2024-11-07T17:17:01,669 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:01,669 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/B is initiating minor compaction (all files) 2024-11-07T17:17:01,669 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/B in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:01,669 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/be01ba2b216f4779b77d6e97e2622837, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ea00e0aa44e84651a21241cf3780a1d0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ac51f2d770394540b11013ed754cfcc7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=34.4 K 2024-11-07T17:17:01,670 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting be01ba2b216f4779b77d6e97e2622837, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1730999817852 2024-11-07T17:17:01,670 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ea00e0aa44e84651a21241cf3780a1d0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1730999818042 2024-11-07T17:17:01,671 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ac51f2d770394540b11013ed754cfcc7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1730999819481 2024-11-07T17:17:01,677 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:17:01,680 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024110708eaf2068f7443e2b8ef519d6430edf1_73d52d69bc80a97d9d4aef7a7d44d969 store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:17:01,680 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#B#compaction#373 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:01,680 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/cdc8b9d838b146f6bd9c9d0e0cee7925 is 50, key is test_row_0/B:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:01,686 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024110708eaf2068f7443e2b8ef519d6430edf1_73d52d69bc80a97d9d4aef7a7d44d969, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:17:01,686 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110708eaf2068f7443e2b8ef519d6430edf1_73d52d69bc80a97d9d4aef7a7d44d969 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:17:01,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742275_1451 (size=13153) 2024-11-07T17:17:01,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742276_1452 (size=4469) 2024-11-07T17:17:01,747 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#A#compaction#372 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:01,747 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/b549207d9cef419286812e8c6273de81 is 175, key is test_row_0/A:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:01,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742277_1453 (size=32107) 2024-11-07T17:17:02,099 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/cdc8b9d838b146f6bd9c9d0e0cee7925 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/cdc8b9d838b146f6bd9c9d0e0cee7925 2024-11-07T17:17:02,107 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/B of 73d52d69bc80a97d9d4aef7a7d44d969 into cdc8b9d838b146f6bd9c9d0e0cee7925(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:02,107 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:17:02,107 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/B, priority=13, startTime=1730999821667; duration=0sec 2024-11-07T17:17:02,107 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:02,107 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:B 2024-11-07T17:17:02,107 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:02,109 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:02,110 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 73d52d69bc80a97d9d4aef7a7d44d969/C is initiating minor compaction (all files) 2024-11-07T17:17:02,110 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73d52d69bc80a97d9d4aef7a7d44d969/C in TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:02,110 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/acdce5bf56cc4a1eba47aea4bebac0f3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3a4a4f7151c4496abb771a1cef7215ec, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/8d00cd27506942cb84478b63f89ddf93] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp, totalSize=34.4 K 2024-11-07T17:17:02,112 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting acdce5bf56cc4a1eba47aea4bebac0f3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1730999817852 2024-11-07T17:17:02,112 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a4a4f7151c4496abb771a1cef7215ec, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1730999818042 2024-11-07T17:17:02,113 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d00cd27506942cb84478b63f89ddf93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1730999819481 2024-11-07T17:17:02,121 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73d52d69bc80a97d9d4aef7a7d44d969#C#compaction#374 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:02,121 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/4be24bc4f02d41478deed76a819ab4e9 is 50, key is test_row_0/C:col10/1730999819579/Put/seqid=0 2024-11-07T17:17:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742278_1454 (size=13153) 2024-11-07T17:17:02,159 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/b549207d9cef419286812e8c6273de81 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b549207d9cef419286812e8c6273de81 2024-11-07T17:17:02,165 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/A of 73d52d69bc80a97d9d4aef7a7d44d969 into b549207d9cef419286812e8c6273de81(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:02,165 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:17:02,165 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/A, priority=13, startTime=1730999821664; duration=0sec 2024-11-07T17:17:02,165 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:02,165 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:A 2024-11-07T17:17:02,536 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/4be24bc4f02d41478deed76a819ab4e9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4be24bc4f02d41478deed76a819ab4e9 2024-11-07T17:17:02,540 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73d52d69bc80a97d9d4aef7a7d44d969/C of 73d52d69bc80a97d9d4aef7a7d44d969 into 4be24bc4f02d41478deed76a819ab4e9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:02,540 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:17:02,540 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969., storeName=73d52d69bc80a97d9d4aef7a7d44d969/C, priority=13, startTime=1730999821667; duration=0sec 2024-11-07T17:17:02,541 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:02,541 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73d52d69bc80a97d9d4aef7a7d44d969:C 2024-11-07T17:17:07,022 ERROR [LeaseRenewer:jenkins@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:39903,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:11,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T17:17:11,270 DEBUG [Thread-1590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:64938 2024-11-07T17:17:11,270 DEBUG [Thread-1590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:11,284 DEBUG [Thread-1582 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc42ea6 to 127.0.0.1:64938 2024-11-07T17:17:11,284 DEBUG [Thread-1582 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 228 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2044 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6132 rows 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2056 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6167 rows 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2047 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6141 rows 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2064 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6192 rows 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2043 2024-11-07T17:17:11,285 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6129 rows 2024-11-07T17:17:11,285 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:17:11,285 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51f7d511 to 127.0.0.1:64938 2024-11-07T17:17:11,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:11,287 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T17:17:11,287 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T17:17:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:11,291 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999831291"}]},"ts":"1730999831291"} 2024-11-07T17:17:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T17:17:11,292 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T17:17:11,294 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T17:17:11,295 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:17:11,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, UNASSIGN}] 2024-11-07T17:17:11,296 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, UNASSIGN 2024-11-07T17:17:11,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:11,297 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:17:11,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:17:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T17:17:11,448 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:11,449 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 73d52d69bc80a97d9d4aef7a7d44d969, disabling compactions & flushes 2024-11-07T17:17:11,449 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. after waiting 0 ms 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:11,449 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 73d52d69bc80a97d9d4aef7a7d44d969 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=A 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=B 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:11,449 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73d52d69bc80a97d9d4aef7a7d44d969, store=C 2024-11-07T17:17:11,450 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:11,454 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074c65599815b04073bbd4f4f631fd0ac2_73d52d69bc80a97d9d4aef7a7d44d969 is 50, key is test_row_0/A:col10/1730999821159/Put/seqid=0 2024-11-07T17:17:11,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742279_1455 (size=9914) 2024-11-07T17:17:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T17:17:11,858 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:11,861 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074c65599815b04073bbd4f4f631fd0ac2_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074c65599815b04073bbd4f4f631fd0ac2_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:11,862 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/7c1211ea8ae84d10b1221e0d3d2354d3, store: [table=TestAcidGuarantees family=A region=73d52d69bc80a97d9d4aef7a7d44d969] 2024-11-07T17:17:11,863 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/7c1211ea8ae84d10b1221e0d3d2354d3 is 175, key is test_row_0/A:col10/1730999821159/Put/seqid=0 2024-11-07T17:17:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742280_1456 (size=22561) 2024-11-07T17:17:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T17:17:12,266 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=368, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/7c1211ea8ae84d10b1221e0d3d2354d3 2024-11-07T17:17:12,272 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/e3ebbec231434199a792c482b624e023 is 50, key is test_row_0/B:col10/1730999821159/Put/seqid=0 2024-11-07T17:17:12,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742281_1457 (size=9857) 2024-11-07T17:17:12,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T17:17:12,676 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/e3ebbec231434199a792c482b624e023 2024-11-07T17:17:12,681 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2e6f7a42d64245868592e649d4c15d8f is 50, key is test_row_0/C:col10/1730999821159/Put/seqid=0 2024-11-07T17:17:12,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742282_1458 (size=9857) 2024-11-07T17:17:13,084 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2e6f7a42d64245868592e649d4c15d8f 2024-11-07T17:17:13,088 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/A/7c1211ea8ae84d10b1221e0d3d2354d3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/7c1211ea8ae84d10b1221e0d3d2354d3 2024-11-07T17:17:13,091 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/7c1211ea8ae84d10b1221e0d3d2354d3, entries=100, sequenceid=368, filesize=22.0 K 2024-11-07T17:17:13,091 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/B/e3ebbec231434199a792c482b624e023 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e3ebbec231434199a792c482b624e023 2024-11-07T17:17:13,094 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e3ebbec231434199a792c482b624e023, entries=100, sequenceid=368, filesize=9.6 K 2024-11-07T17:17:13,094 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/.tmp/C/2e6f7a42d64245868592e649d4c15d8f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2e6f7a42d64245868592e649d4c15d8f 2024-11-07T17:17:13,097 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2e6f7a42d64245868592e649d4c15d8f, entries=100, sequenceid=368, filesize=9.6 K 2024-11-07T17:17:13,098 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 73d52d69bc80a97d9d4aef7a7d44d969 in 1649ms, sequenceid=368, compaction requested=false 2024-11-07T17:17:13,098 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9a9b13d983e846c28f0e6c191c238537, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4335ebf28e1248228c79a63dc8a33aa6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17ea650777b34428a3754817f565a45e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2d75de595fd44740948445bd916471ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9ba7098337c84dd197240a1c819511fe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/5acd73736d4a422f9fbd40fbdfe9b244, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17faae1995b14d86a311fcde16e16f2e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8] to archive 2024-11-07T17:17:13,099 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:17:13,100 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b79380b7eb0d4faa91e5af467393cf5a 2024-11-07T17:17:13,101 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2031ffe4c4e74ebbb585c34ab4e7219b 2024-11-07T17:17:13,102 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/775489e744cc4d778fbc7df089f0421a 2024-11-07T17:17:13,103 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9a9b13d983e846c28f0e6c191c238537 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9a9b13d983e846c28f0e6c191c238537 2024-11-07T17:17:13,104 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4e3609af8c1a4e62bcb914d5488eb2b6 2024-11-07T17:17:13,105 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/0fdc3e7e06b743e98a58946a25479406 2024-11-07T17:17:13,106 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4335ebf28e1248228c79a63dc8a33aa6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/4335ebf28e1248228c79a63dc8a33aa6 2024-11-07T17:17:13,107 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/c49572f4d1cf4a1cb43661f18bb30ef7 2024-11-07T17:17:13,107 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/94b547484cf54f2fb880015b433f18ce 2024-11-07T17:17:13,108 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17ea650777b34428a3754817f565a45e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17ea650777b34428a3754817f565a45e 2024-11-07T17:17:13,109 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/1b744b917c8248b893558645efc8ef50 2024-11-07T17:17:13,110 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cbe0725fffd0484ea453736bd158b1fd 2024-11-07T17:17:13,111 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/195e7e53a9a64b6287a0f5d4c0044549 2024-11-07T17:17:13,112 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2d75de595fd44740948445bd916471ff to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/2d75de595fd44740948445bd916471ff 2024-11-07T17:17:13,112 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9cc37c8a37c44aafbc9bf506740259f1 2024-11-07T17:17:13,113 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/130c19de8ecf4308ab65c13bcc1359ab 2024-11-07T17:17:13,114 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9ba7098337c84dd197240a1c819511fe to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/9ba7098337c84dd197240a1c819511fe 2024-11-07T17:17:13,115 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/fd1a486af0b54316bee531dda46b355b 2024-11-07T17:17:13,116 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/44f4509781ac491595a5267e65b0b635 2024-11-07T17:17:13,117 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/5acd73736d4a422f9fbd40fbdfe9b244 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/5acd73736d4a422f9fbd40fbdfe9b244 2024-11-07T17:17:13,117 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/152d5fac8ffe4d6abcd272d140a829de 2024-11-07T17:17:13,118 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/15ec1c624fa9485cb6c805f9f8bedcf0 2024-11-07T17:17:13,119 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17faae1995b14d86a311fcde16e16f2e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/17faae1995b14d86a311fcde16e16f2e 2024-11-07T17:17:13,120 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/50b02fb78bd8408da6c9223c5232b7bf 2024-11-07T17:17:13,121 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/cd769ed8a4a449ce811c89ca54d741d8 2024-11-07T17:17:13,122 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/93a5832c58834a80b2be0203073b1038, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e4f10f096ebc4251ac7221b7f4a0c797, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2aca49c2889e4bf895115c6be1f892fd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/da8183934fc042a689f226e86e9d1698, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9d6b3d49f305447fab34b49a3934a360, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2f6d274634b542ddae9a9c4ef413e99a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6b7e8dc5ff1b49cab412a3fac1e21d93, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/f07ae36da3e74665939449e1b65f724f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/d51ac716ebf84c4ba75ce677d214b6ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/dc147662f6ab4e809cc3719fbc70d4ef, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a1b3f0017fb848b18027dd946c3e0204, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/413cdb9cf695419999dde4908aff8be8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/46326101af214751b036dadd8bd0d9ac, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/8d952034f9fa4e3b98808ad051ea3c4a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/bee6d76922b1418e86d2e34da0b7e512, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a3902fa7ec38480083f614e240938497, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9f73a7d2c0c84562bab8965e7897e8d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/c8512bf27b744e00bd2da8296d047562, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/85af3b1e81ec40a09c710262746e9778, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ba79ac6f28d545b79ebf8287942c2875, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2e8ae0b0f7674a79aec4d48d63798091, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/be01ba2b216f4779b77d6e97e2622837, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6351b97ffa30420ea26f0445191c0131, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ea00e0aa44e84651a21241cf3780a1d0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ac51f2d770394540b11013ed754cfcc7] to archive 2024-11-07T17:17:13,123 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:17:13,124 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/93a5832c58834a80b2be0203073b1038 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/93a5832c58834a80b2be0203073b1038 2024-11-07T17:17:13,125 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e4f10f096ebc4251ac7221b7f4a0c797 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e4f10f096ebc4251ac7221b7f4a0c797 2024-11-07T17:17:13,126 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2aca49c2889e4bf895115c6be1f892fd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2aca49c2889e4bf895115c6be1f892fd 2024-11-07T17:17:13,126 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/da8183934fc042a689f226e86e9d1698 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/da8183934fc042a689f226e86e9d1698 2024-11-07T17:17:13,127 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9d6b3d49f305447fab34b49a3934a360 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9d6b3d49f305447fab34b49a3934a360 2024-11-07T17:17:13,128 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2f6d274634b542ddae9a9c4ef413e99a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2f6d274634b542ddae9a9c4ef413e99a 2024-11-07T17:17:13,129 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6b7e8dc5ff1b49cab412a3fac1e21d93 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6b7e8dc5ff1b49cab412a3fac1e21d93 2024-11-07T17:17:13,130 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/f07ae36da3e74665939449e1b65f724f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/f07ae36da3e74665939449e1b65f724f 2024-11-07T17:17:13,130 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/d51ac716ebf84c4ba75ce677d214b6ff to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/d51ac716ebf84c4ba75ce677d214b6ff 2024-11-07T17:17:13,131 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/dc147662f6ab4e809cc3719fbc70d4ef to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/dc147662f6ab4e809cc3719fbc70d4ef 2024-11-07T17:17:13,132 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a1b3f0017fb848b18027dd946c3e0204 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a1b3f0017fb848b18027dd946c3e0204 2024-11-07T17:17:13,133 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/413cdb9cf695419999dde4908aff8be8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/413cdb9cf695419999dde4908aff8be8 2024-11-07T17:17:13,134 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/46326101af214751b036dadd8bd0d9ac to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/46326101af214751b036dadd8bd0d9ac 2024-11-07T17:17:13,134 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/8d952034f9fa4e3b98808ad051ea3c4a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/8d952034f9fa4e3b98808ad051ea3c4a 2024-11-07T17:17:13,135 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/bee6d76922b1418e86d2e34da0b7e512 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/bee6d76922b1418e86d2e34da0b7e512 2024-11-07T17:17:13,136 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a3902fa7ec38480083f614e240938497 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/a3902fa7ec38480083f614e240938497 2024-11-07T17:17:13,137 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9f73a7d2c0c84562bab8965e7897e8d7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/9f73a7d2c0c84562bab8965e7897e8d7 2024-11-07T17:17:13,137 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/c8512bf27b744e00bd2da8296d047562 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/c8512bf27b744e00bd2da8296d047562 2024-11-07T17:17:13,138 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/85af3b1e81ec40a09c710262746e9778 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/85af3b1e81ec40a09c710262746e9778 2024-11-07T17:17:13,139 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ba79ac6f28d545b79ebf8287942c2875 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ba79ac6f28d545b79ebf8287942c2875 2024-11-07T17:17:13,140 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2e8ae0b0f7674a79aec4d48d63798091 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/2e8ae0b0f7674a79aec4d48d63798091 2024-11-07T17:17:13,141 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/be01ba2b216f4779b77d6e97e2622837 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/be01ba2b216f4779b77d6e97e2622837 2024-11-07T17:17:13,141 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6351b97ffa30420ea26f0445191c0131 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/6351b97ffa30420ea26f0445191c0131 2024-11-07T17:17:13,142 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ea00e0aa44e84651a21241cf3780a1d0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ea00e0aa44e84651a21241cf3780a1d0 2024-11-07T17:17:13,143 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ac51f2d770394540b11013ed754cfcc7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/ac51f2d770394540b11013ed754cfcc7 2024-11-07T17:17:13,144 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/771cbbc17a54455994b6107c009f8d39, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3d10eea8a81b4653809c036354f3eeae, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/f0dd39b9ee9c4c3b91395dc6b068d5d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/1315452bef6c4c659cd591daaaecc05f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c72e6aa3f470469a88a00fb7c7e48754, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/0519cadcc0f641f99888447ac74b7db0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/ca32ee4e847a45f8a0ac29675c9e47aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e81ff8f3028e4d259d45314079b58474, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/d1af0a67a08f4b9d81261f556950a386, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4c16942abe404c0ea89a6b5dbe8832be, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/50f55d1b5f374ec488f9e301f1197fcc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c1d97eb430b6407ab7cc924234c29510, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3bf941197a4f494b910fe70af537b346, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c8a8c129966b42589b68e68b980dea5b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/398218e6a22e4faab9e861bdad770f47, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/39b074b4c4a54f39883c287ff5bbd544, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3cc7077826904aeea05c865d93155960, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/a74cdc15e9f84e2ebbc44c7ebc43b767, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e78f4e4304874b96ae5584667ab4a1f7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2a700339086a40ba818dcf828d4d70e2, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/df1018e0d3594d1485fc092811640f2a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/acdce5bf56cc4a1eba47aea4bebac0f3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2d98a32b1e384d26aa63dc31cd059f1f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3a4a4f7151c4496abb771a1cef7215ec, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/8d00cd27506942cb84478b63f89ddf93] to archive 2024-11-07T17:17:13,145 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:17:13,146 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/771cbbc17a54455994b6107c009f8d39 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/771cbbc17a54455994b6107c009f8d39 2024-11-07T17:17:13,146 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3d10eea8a81b4653809c036354f3eeae to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3d10eea8a81b4653809c036354f3eeae 2024-11-07T17:17:13,147 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/f0dd39b9ee9c4c3b91395dc6b068d5d7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/f0dd39b9ee9c4c3b91395dc6b068d5d7 2024-11-07T17:17:13,148 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/1315452bef6c4c659cd591daaaecc05f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/1315452bef6c4c659cd591daaaecc05f 2024-11-07T17:17:13,149 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c72e6aa3f470469a88a00fb7c7e48754 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c72e6aa3f470469a88a00fb7c7e48754 2024-11-07T17:17:13,149 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/0519cadcc0f641f99888447ac74b7db0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/0519cadcc0f641f99888447ac74b7db0 2024-11-07T17:17:13,150 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/ca32ee4e847a45f8a0ac29675c9e47aa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/ca32ee4e847a45f8a0ac29675c9e47aa 2024-11-07T17:17:13,151 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e81ff8f3028e4d259d45314079b58474 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e81ff8f3028e4d259d45314079b58474 2024-11-07T17:17:13,152 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/d1af0a67a08f4b9d81261f556950a386 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/d1af0a67a08f4b9d81261f556950a386 2024-11-07T17:17:13,153 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4c16942abe404c0ea89a6b5dbe8832be to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4c16942abe404c0ea89a6b5dbe8832be 2024-11-07T17:17:13,153 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/50f55d1b5f374ec488f9e301f1197fcc to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/50f55d1b5f374ec488f9e301f1197fcc 2024-11-07T17:17:13,154 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c1d97eb430b6407ab7cc924234c29510 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c1d97eb430b6407ab7cc924234c29510 2024-11-07T17:17:13,155 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3bf941197a4f494b910fe70af537b346 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3bf941197a4f494b910fe70af537b346 2024-11-07T17:17:13,156 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c8a8c129966b42589b68e68b980dea5b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/c8a8c129966b42589b68e68b980dea5b 2024-11-07T17:17:13,157 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/398218e6a22e4faab9e861bdad770f47 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/398218e6a22e4faab9e861bdad770f47 2024-11-07T17:17:13,157 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/39b074b4c4a54f39883c287ff5bbd544 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/39b074b4c4a54f39883c287ff5bbd544 2024-11-07T17:17:13,158 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3cc7077826904aeea05c865d93155960 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3cc7077826904aeea05c865d93155960 2024-11-07T17:17:13,159 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/a74cdc15e9f84e2ebbc44c7ebc43b767 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/a74cdc15e9f84e2ebbc44c7ebc43b767 2024-11-07T17:17:13,160 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e78f4e4304874b96ae5584667ab4a1f7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/e78f4e4304874b96ae5584667ab4a1f7 2024-11-07T17:17:13,160 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2a700339086a40ba818dcf828d4d70e2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2a700339086a40ba818dcf828d4d70e2 2024-11-07T17:17:13,161 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/df1018e0d3594d1485fc092811640f2a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/df1018e0d3594d1485fc092811640f2a 2024-11-07T17:17:13,162 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/acdce5bf56cc4a1eba47aea4bebac0f3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/acdce5bf56cc4a1eba47aea4bebac0f3 2024-11-07T17:17:13,163 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2d98a32b1e384d26aa63dc31cd059f1f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2d98a32b1e384d26aa63dc31cd059f1f 2024-11-07T17:17:13,163 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3a4a4f7151c4496abb771a1cef7215ec to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/3a4a4f7151c4496abb771a1cef7215ec 2024-11-07T17:17:13,164 DEBUG [StoreCloser-TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/8d00cd27506942cb84478b63f89ddf93 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/8d00cd27506942cb84478b63f89ddf93 2024-11-07T17:17:13,167 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/recovered.edits/371.seqid, newMaxSeqId=371, maxSeqId=4 2024-11-07T17:17:13,168 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969. 2024-11-07T17:17:13,168 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 73d52d69bc80a97d9d4aef7a7d44d969: 2024-11-07T17:17:13,169 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,169 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=73d52d69bc80a97d9d4aef7a7d44d969, regionState=CLOSED 2024-11-07T17:17:13,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-07T17:17:13,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 73d52d69bc80a97d9d4aef7a7d44d969, server=3a0fde618c86,37403,1730999712734 in 1.8730 sec 2024-11-07T17:17:13,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-07T17:17:13,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=73d52d69bc80a97d9d4aef7a7d44d969, UNASSIGN in 1.8750 sec 2024-11-07T17:17:13,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-07T17:17:13,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8780 sec 2024-11-07T17:17:13,175 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999833175"}]},"ts":"1730999833175"} 2024-11-07T17:17:13,175 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T17:17:13,178 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T17:17:13,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8910 sec 2024-11-07T17:17:13,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T17:17:13,395 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-07T17:17:13,395 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T17:17:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,396 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T17:17:13,397 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,399 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,401 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/recovered.edits] 2024-11-07T17:17:13,403 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/7c1211ea8ae84d10b1221e0d3d2354d3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/7c1211ea8ae84d10b1221e0d3d2354d3 2024-11-07T17:17:13,403 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b549207d9cef419286812e8c6273de81 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/A/b549207d9cef419286812e8c6273de81 2024-11-07T17:17:13,405 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/cdc8b9d838b146f6bd9c9d0e0cee7925 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/cdc8b9d838b146f6bd9c9d0e0cee7925 2024-11-07T17:17:13,406 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e3ebbec231434199a792c482b624e023 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/B/e3ebbec231434199a792c482b624e023 2024-11-07T17:17:13,408 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2e6f7a42d64245868592e649d4c15d8f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/2e6f7a42d64245868592e649d4c15d8f 2024-11-07T17:17:13,409 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4be24bc4f02d41478deed76a819ab4e9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/C/4be24bc4f02d41478deed76a819ab4e9 2024-11-07T17:17:13,411 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/recovered.edits/371.seqid to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969/recovered.edits/371.seqid 2024-11-07T17:17:13,411 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,411 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T17:17:13,412 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T17:17:13,412 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-07T17:17:13,415 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110707a4ad020d874ceb8dc7e5318a6f809d_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110707a4ad020d874ceb8dc7e5318a6f809d_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,416 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071fca64a58bec419abb65d07b334552a9_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071fca64a58bec419abb65d07b334552a9_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,417 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072758d47d33474869b730915b87b883ae_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072758d47d33474869b730915b87b883ae_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,418 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107287b6d6d10ef4b2c9da357b03425ca28_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107287b6d6d10ef4b2c9da357b03425ca28_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,419 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072b70b05f6dbd49b0b704a25a291b0084_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072b70b05f6dbd49b0b704a25a291b0084_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,420 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072e3738c32cef45c7a00e1aa5b42d5de5_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072e3738c32cef45c7a00e1aa5b42d5de5_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,421 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073f75a1946baa42d5bce3e121668b0262_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073f75a1946baa42d5bce3e121668b0262_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,422 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107472db18b368e49d296059f2ad071f859_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107472db18b368e49d296059f2ad071f859_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,423 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074c65599815b04073bbd4f4f631fd0ac2_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074c65599815b04073bbd4f4f631fd0ac2_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,424 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107678ccf16dcf04c049c9ccf2b6a69db76_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107678ccf16dcf04c049c9ccf2b6a69db76_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,425 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078b68d5d3360a469f89ca85c765836fec_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078b68d5d3360a469f89ca85c765836fec_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,426 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079bdc6461900f4f959f51c589046b43da_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079bdc6461900f4f959f51c589046b43da_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,427 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a20c7162ad05413bace6c131fc93858c_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a20c7162ad05413bace6c131fc93858c_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,428 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c5b61b92b3174e2293629db29c6fc247_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c5b61b92b3174e2293629db29c6fc247_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,429 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c839d99ac76d4ec9bf653076334f5cc7_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c839d99ac76d4ec9bf653076334f5cc7_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,430 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c892cdbe2d644e7089986a41989e362a_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c892cdbe2d644e7089986a41989e362a_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,431 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ed4cd1c770f64dd0b41dbbe52cb4dd50_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ed4cd1c770f64dd0b41dbbe52cb4dd50_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,432 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107eeac75e517af4864bd0c28a3ec41ca43_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107eeac75e517af4864bd0c28a3ec41ca43_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,433 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fa529185c784427db29789bc258cf4a6_73d52d69bc80a97d9d4aef7a7d44d969 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fa529185c784427db29789bc258cf4a6_73d52d69bc80a97d9d4aef7a7d44d969 2024-11-07T17:17:13,433 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T17:17:13,435 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,437 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T17:17:13,438 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T17:17:13,439 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,439 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T17:17:13,439 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730999833439"}]},"ts":"9223372036854775807"} 2024-11-07T17:17:13,441 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T17:17:13,441 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 73d52d69bc80a97d9d4aef7a7d44d969, NAME => 'TestAcidGuarantees,,1730999797000.73d52d69bc80a97d9d4aef7a7d44d969.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T17:17:13,441 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T17:17:13,441 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730999833441"}]},"ts":"9223372036854775807"} 2024-11-07T17:17:13,442 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T17:17:13,444 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 49 msec 2024-11-07T17:17:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T17:17:13,498 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-07T17:17:13,507 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=235 (was 238), OpenFileDescriptor=449 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=537 (was 473) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2713 (was 2761) 2024-11-07T17:17:13,515 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=235, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=537, ProcessCount=11, AvailableMemoryMB=2713 2024-11-07T17:17:13,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:17:13,516 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:17:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:13,517 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:17:13,518 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:13,518 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-07T17:17:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-07T17:17:13,518 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:17:13,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742283_1459 (size=960) 2024-11-07T17:17:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-07T17:17:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-07T17:17:13,925 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:17:13,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742284_1460 (size=53) 2024-11-07T17:17:14,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-07T17:17:14,330 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:17:14,330 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1711199603e16c41a1a94c45a03f0bd8, disabling compactions & flushes 2024-11-07T17:17:14,330 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,331 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,331 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. after waiting 0 ms 2024-11-07T17:17:14,331 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,331 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,331 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:14,331 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:17:14,332 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730999834331"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999834331"}]},"ts":"1730999834331"} 2024-11-07T17:17:14,333 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:17:14,333 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:17:14,333 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999834333"}]},"ts":"1730999834333"} 2024-11-07T17:17:14,334 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T17:17:14,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, ASSIGN}] 2024-11-07T17:17:14,338 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, ASSIGN 2024-11-07T17:17:14,338 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:17:14,489 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=1711199603e16c41a1a94c45a03f0bd8, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:14,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure 1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:17:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-07T17:17:14,641 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:14,644 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,644 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:17:14,644 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,644 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:17:14,644 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,644 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,645 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,646 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:14,647 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1711199603e16c41a1a94c45a03f0bd8 columnFamilyName A 2024-11-07T17:17:14,647 DEBUG [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:14,647 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.HStore(327): Store=1711199603e16c41a1a94c45a03f0bd8/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:14,647 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,648 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:14,648 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1711199603e16c41a1a94c45a03f0bd8 columnFamilyName B 2024-11-07T17:17:14,648 DEBUG [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:14,649 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.HStore(327): Store=1711199603e16c41a1a94c45a03f0bd8/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:14,649 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,650 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:14,650 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1711199603e16c41a1a94c45a03f0bd8 columnFamilyName C 2024-11-07T17:17:14,650 DEBUG [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:14,650 INFO [StoreOpener-1711199603e16c41a1a94c45a03f0bd8-1 {}] regionserver.HStore(327): Store=1711199603e16c41a1a94c45a03f0bd8/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:14,650 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,651 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,651 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,652 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:17:14,653 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:14,655 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:17:14,655 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened 1711199603e16c41a1a94c45a03f0bd8; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64144493, jitterRate=-0.04417257010936737}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:17:14,655 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:14,656 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., pid=129, masterSystemTime=1730999834641 2024-11-07T17:17:14,657 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,657 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:14,658 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=1711199603e16c41a1a94c45a03f0bd8, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:14,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-07T17:17:14,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure 1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 in 169 msec 2024-11-07T17:17:14,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-07T17:17:14,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, ASSIGN in 323 msec 2024-11-07T17:17:14,662 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:17:14,662 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999834662"}]},"ts":"1730999834662"} 2024-11-07T17:17:14,663 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T17:17:14,665 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:17:14,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-11-07T17:17:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-07T17:17:15,622 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-07T17:17:15,623 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-11-07T17:17:15,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,628 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,629 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,630 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:17:15,630 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:17:15,632 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-11-07T17:17:15,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,635 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-11-07T17:17:15,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,638 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-11-07T17:17:15,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-11-07T17:17:15,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-11-07T17:17:15,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,648 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-11-07T17:17:15,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-11-07T17:17:15,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,655 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-11-07T17:17:15,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-11-07T17:17:15,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,663 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d49886 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73d92042 2024-11-07T17:17:15,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c692575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:15,672 DEBUG [hconnection-0x2da02a2f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,673 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,675 DEBUG [hconnection-0xcdcca0d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,676 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,676 DEBUG [hconnection-0x5f6d705b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,677 DEBUG [hconnection-0x60f31045-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,677 DEBUG [hconnection-0x6b3fada3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,677 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,677 DEBUG [hconnection-0x7a282a3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,677 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,678 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59916, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,678 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:15,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-07T17:17:15,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T17:17:15,680 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:15,681 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:15,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:15,684 DEBUG [hconnection-0x20fb0dfe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,685 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:15,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:17:15,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:15,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:15,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:15,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:15,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:15,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:15,697 DEBUG [hconnection-0x2aeb0800-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,697 DEBUG [hconnection-0x44beb800-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,697 DEBUG [hconnection-0x35b4cd7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:15,698 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,698 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,698 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:15,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/92a3811c9e35457c895d2a4a9dfd4df5 is 50, key is test_row_0/A:col10/1730999835686/Put/seqid=0 2024-11-07T17:17:15,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999895708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999895708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999895709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999895709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999895710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742285_1461 (size=12001) 2024-11-07T17:17:15,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/92a3811c9e35457c895d2a4a9dfd4df5 2024-11-07T17:17:15,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/478d5dd5a81843be877947d981f13212 is 50, key is test_row_0/B:col10/1730999835686/Put/seqid=0 2024-11-07T17:17:15,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T17:17:15,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742286_1462 (size=12001) 2024-11-07T17:17:15,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/478d5dd5a81843be877947d981f13212 2024-11-07T17:17:15,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/e0c5620d204149ca8eb3d57fdca4e000 is 50, key is test_row_0/C:col10/1730999835686/Put/seqid=0 2024-11-07T17:17:15,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742287_1463 (size=12001) 2024-11-07T17:17:15,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999895817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999895817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999895817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999895817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:15,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999895818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,832 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T17:17:15,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:15,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:15,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:15,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:15,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:15,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:15,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T17:17:15,985 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:15,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T17:17:15,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:15,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:15,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:15,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:15,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:15,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:16,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999896020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999896021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999896022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999896022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999896022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T17:17:16,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:16,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:16,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:16,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:16,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/e0c5620d204149ca8eb3d57fdca4e000 2024-11-07T17:17:16,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/92a3811c9e35457c895d2a4a9dfd4df5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/92a3811c9e35457c895d2a4a9dfd4df5 2024-11-07T17:17:16,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/92a3811c9e35457c895d2a4a9dfd4df5, entries=150, sequenceid=15, filesize=11.7 K 2024-11-07T17:17:16,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/478d5dd5a81843be877947d981f13212 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/478d5dd5a81843be877947d981f13212 2024-11-07T17:17:16,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/478d5dd5a81843be877947d981f13212, entries=150, sequenceid=15, filesize=11.7 K 2024-11-07T17:17:16,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/e0c5620d204149ca8eb3d57fdca4e000 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e0c5620d204149ca8eb3d57fdca4e000 2024-11-07T17:17:16,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e0c5620d204149ca8eb3d57fdca4e000, entries=150, sequenceid=15, filesize=11.7 K 2024-11-07T17:17:16,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1711199603e16c41a1a94c45a03f0bd8 in 551ms, sequenceid=15, compaction requested=false 2024-11-07T17:17:16,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T17:17:16,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T17:17:16,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,290 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:17:16,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:16,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:16,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:16,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/67d304c9e4fc404f8c56d9d1e64a4801 is 50, key is test_row_0/A:col10/1730999835697/Put/seqid=0 2024-11-07T17:17:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742288_1464 (size=12001) 2024-11-07T17:17:16,299 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/67d304c9e4fc404f8c56d9d1e64a4801 2024-11-07T17:17:16,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/33208c211b654ff48ca4ca5f1f26c180 is 50, key is test_row_0/B:col10/1730999835697/Put/seqid=0 2024-11-07T17:17:16,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742289_1465 (size=12001) 2024-11-07T17:17:16,311 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/33208c211b654ff48ca4ca5f1f26c180 2024-11-07T17:17:16,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/1e431eacc4af400a969403a6a33d5eb8 is 50, key is test_row_0/C:col10/1730999835697/Put/seqid=0 2024-11-07T17:17:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742290_1466 (size=12001) 2024-11-07T17:17:16,324 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/1e431eacc4af400a969403a6a33d5eb8 2024-11-07T17:17:16,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:16,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:16,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/67d304c9e4fc404f8c56d9d1e64a4801 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/67d304c9e4fc404f8c56d9d1e64a4801 2024-11-07T17:17:16,338 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/67d304c9e4fc404f8c56d9d1e64a4801, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:17:16,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/33208c211b654ff48ca4ca5f1f26c180 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/33208c211b654ff48ca4ca5f1f26c180 2024-11-07T17:17:16,343 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/33208c211b654ff48ca4ca5f1f26c180, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:17:16,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/1e431eacc4af400a969403a6a33d5eb8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e431eacc4af400a969403a6a33d5eb8 2024-11-07T17:17:16,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999896341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999896341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999896342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999896343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,350 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e431eacc4af400a969403a6a33d5eb8, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T17:17:16,350 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1711199603e16c41a1a94c45a03f0bd8 in 60ms, sequenceid=37, compaction requested=false 2024-11-07T17:17:16,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:16,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-07T17:17:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-07T17:17:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:16,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:17:16,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:16,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:16,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:16,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-07T17:17:16,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 671 msec 2024-11-07T17:17:16,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 682 msec 2024-11-07T17:17:16,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e8b1ad029bae41ba87a4be65ebd7ad8c is 50, key is test_row_0/A:col10/1730999836345/Put/seqid=0 2024-11-07T17:17:16,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742291_1467 (size=12001) 2024-11-07T17:17:16,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e8b1ad029bae41ba87a4be65ebd7ad8c 2024-11-07T17:17:16,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d875f57a28ff402fa513fbe2e0029a17 is 50, key is test_row_0/B:col10/1730999836345/Put/seqid=0 2024-11-07T17:17:16,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742292_1468 (size=12001) 2024-11-07T17:17:16,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d875f57a28ff402fa513fbe2e0029a17 2024-11-07T17:17:16,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/9d32b7c2ff3845ceb0e0e70a3ed9befa is 50, key is test_row_0/C:col10/1730999836345/Put/seqid=0 2024-11-07T17:17:16,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742293_1469 (size=12001) 2024-11-07T17:17:16,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999896443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999896448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999896448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999896449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999896449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999896548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999896655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999896655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999896655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999896656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999896753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T17:17:16,783 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-07T17:17:16,784 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:16,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-07T17:17:16,786 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:16,786 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:16,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:16,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T17:17:16,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/9d32b7c2ff3845ceb0e0e70a3ed9befa 2024-11-07T17:17:16,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e8b1ad029bae41ba87a4be65ebd7ad8c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e8b1ad029bae41ba87a4be65ebd7ad8c 2024-11-07T17:17:16,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e8b1ad029bae41ba87a4be65ebd7ad8c, entries=150, sequenceid=52, filesize=11.7 K 2024-11-07T17:17:16,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d875f57a28ff402fa513fbe2e0029a17 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d875f57a28ff402fa513fbe2e0029a17 2024-11-07T17:17:16,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d875f57a28ff402fa513fbe2e0029a17, entries=150, sequenceid=52, filesize=11.7 K 2024-11-07T17:17:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/9d32b7c2ff3845ceb0e0e70a3ed9befa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9d32b7c2ff3845ceb0e0e70a3ed9befa 2024-11-07T17:17:16,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9d32b7c2ff3845ceb0e0e70a3ed9befa, entries=150, sequenceid=52, filesize=11.7 K 2024-11-07T17:17:16,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 469ms, sequenceid=52, compaction requested=true 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:16,821 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:16,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:16,821 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:16,828 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:16,828 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:16,828 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,828 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/92a3811c9e35457c895d2a4a9dfd4df5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/67d304c9e4fc404f8c56d9d1e64a4801, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e8b1ad029bae41ba87a4be65ebd7ad8c] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=35.2 K 2024-11-07T17:17:16,828 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:16,828 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92a3811c9e35457c895d2a4a9dfd4df5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1730999835683 2024-11-07T17:17:16,828 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:16,828 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,829 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/478d5dd5a81843be877947d981f13212, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/33208c211b654ff48ca4ca5f1f26c180, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d875f57a28ff402fa513fbe2e0029a17] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=35.2 K 2024-11-07T17:17:16,829 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67d304c9e4fc404f8c56d9d1e64a4801, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999835697 2024-11-07T17:17:16,829 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 478d5dd5a81843be877947d981f13212, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1730999835683 2024-11-07T17:17:16,830 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8b1ad029bae41ba87a4be65ebd7ad8c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999836341 2024-11-07T17:17:16,830 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 33208c211b654ff48ca4ca5f1f26c180, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999835697 2024-11-07T17:17:16,830 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d875f57a28ff402fa513fbe2e0029a17, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999836341 2024-11-07T17:17:16,838 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#387 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:16,838 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#388 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:16,838 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c40aa37913b74728ac6b733cef9acaa5 is 50, key is test_row_0/A:col10/1730999836345/Put/seqid=0 2024-11-07T17:17:16,838 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/14839db64b1d4710b00c798aad8cd655 is 50, key is test_row_0/B:col10/1730999836345/Put/seqid=0 2024-11-07T17:17:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742294_1470 (size=12104) 2024-11-07T17:17:16,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742295_1471 (size=12104) 2024-11-07T17:17:16,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T17:17:16,938 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-07T17:17:16,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:16,939 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:17:16,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:16,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:16,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/d5b1dae73b82402e8cbd61052e6a80eb is 50, key is test_row_0/A:col10/1730999836441/Put/seqid=0 2024-11-07T17:17:16,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:16,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:16,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742296_1472 (size=12001) 2024-11-07T17:17:16,981 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/d5b1dae73b82402e8cbd61052e6a80eb 2024-11-07T17:17:16,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999896982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999896983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999896987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:16,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999896988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/fbd8f591bb5b4ec3b25d2d4a77cae75f is 50, key is test_row_0/B:col10/1730999836441/Put/seqid=0 2024-11-07T17:17:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742297_1473 (size=12001) 2024-11-07T17:17:17,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999897056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T17:17:17,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999897089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999897093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999897099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999897099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,256 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c40aa37913b74728ac6b733cef9acaa5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c40aa37913b74728ac6b733cef9acaa5 2024-11-07T17:17:17,258 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/14839db64b1d4710b00c798aad8cd655 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/14839db64b1d4710b00c798aad8cd655 2024-11-07T17:17:17,263 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into c40aa37913b74728ac6b733cef9acaa5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:17,263 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:17,263 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=13, startTime=1730999836821; duration=0sec 2024-11-07T17:17:17,264 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:17,264 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:17,264 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:17,265 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:17,265 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:17,265 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:17,265 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e0c5620d204149ca8eb3d57fdca4e000, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e431eacc4af400a969403a6a33d5eb8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9d32b7c2ff3845ceb0e0e70a3ed9befa] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=35.2 K 2024-11-07T17:17:17,267 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0c5620d204149ca8eb3d57fdca4e000, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1730999835683 2024-11-07T17:17:17,267 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e431eacc4af400a969403a6a33d5eb8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730999835697 2024-11-07T17:17:17,268 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d32b7c2ff3845ceb0e0e70a3ed9befa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999836341 2024-11-07T17:17:17,276 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 14839db64b1d4710b00c798aad8cd655(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:17,276 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:17,276 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=13, startTime=1730999836821; duration=0sec 2024-11-07T17:17:17,276 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:17,276 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:17,279 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#391 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:17,279 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/39a9c95c7984497ca1949f033851468d is 50, key is test_row_0/C:col10/1730999836345/Put/seqid=0 2024-11-07T17:17:17,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999897298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999897300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742298_1474 (size=12104) 2024-11-07T17:17:17,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999897304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999897305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,312 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/39a9c95c7984497ca1949f033851468d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/39a9c95c7984497ca1949f033851468d 2024-11-07T17:17:17,317 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 39a9c95c7984497ca1949f033851468d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:17,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:17,317 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=13, startTime=1730999836821; duration=0sec 2024-11-07T17:17:17,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:17,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T17:17:17,415 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/fbd8f591bb5b4ec3b25d2d4a77cae75f 2024-11-07T17:17:17,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d5959533c3e143748842f992ae74d927 is 50, key is test_row_0/C:col10/1730999836441/Put/seqid=0 2024-11-07T17:17:17,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742299_1475 (size=12001) 2024-11-07T17:17:17,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999897562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999897605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999897606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999897613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:17,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999897614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:17,837 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d5959533c3e143748842f992ae74d927 2024-11-07T17:17:17,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/d5b1dae73b82402e8cbd61052e6a80eb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d5b1dae73b82402e8cbd61052e6a80eb 2024-11-07T17:17:17,848 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d5b1dae73b82402e8cbd61052e6a80eb, entries=150, sequenceid=73, filesize=11.7 K 2024-11-07T17:17:17,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/fbd8f591bb5b4ec3b25d2d4a77cae75f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/fbd8f591bb5b4ec3b25d2d4a77cae75f 2024-11-07T17:17:17,855 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/fbd8f591bb5b4ec3b25d2d4a77cae75f, entries=150, sequenceid=73, filesize=11.7 K 2024-11-07T17:17:17,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d5959533c3e143748842f992ae74d927 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d5959533c3e143748842f992ae74d927 2024-11-07T17:17:17,861 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d5959533c3e143748842f992ae74d927, entries=150, sequenceid=73, filesize=11.7 K 2024-11-07T17:17:17,861 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 1711199603e16c41a1a94c45a03f0bd8 in 922ms, sequenceid=73, compaction requested=false 2024-11-07T17:17:17,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:17,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:17,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-07T17:17:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-07T17:17:17,866 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-07T17:17:17,866 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0780 sec 2024-11-07T17:17:17,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.0820 sec 2024-11-07T17:17:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T17:17:17,891 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-07T17:17:17,893 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:17,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-07T17:17:17,896 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:17,898 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:17,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T17:17:18,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T17:17:18,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-07T17:17:18,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:18,052 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:17:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:18,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/b601482b6176432882ee58487636d728 is 50, key is test_row_0/A:col10/1730999836986/Put/seqid=0 2024-11-07T17:17:18,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742300_1476 (size=9657) 2024-11-07T17:17:18,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:18,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:18,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999898154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999898155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999898157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999898158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T17:17:18,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999898264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999898264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999898266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999898267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999898471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999898471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999898473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999898474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,499 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/b601482b6176432882ee58487636d728 2024-11-07T17:17:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T17:17:18,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/145c1a8ec862405b9d61c0e0e1812baf is 50, key is test_row_0/B:col10/1730999836986/Put/seqid=0 2024-11-07T17:17:18,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742301_1477 (size=9657) 2024-11-07T17:17:18,536 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/145c1a8ec862405b9d61c0e0e1812baf 2024-11-07T17:17:18,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/b277c85dcdf6453dba063dce4389ef33 is 50, key is test_row_0/C:col10/1730999836986/Put/seqid=0 2024-11-07T17:17:18,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999898572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742302_1478 (size=9657) 2024-11-07T17:17:18,706 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T17:17:18,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999898776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999898777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999898778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:18,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999898779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:18,982 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/b277c85dcdf6453dba063dce4389ef33 2024-11-07T17:17:18,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/b601482b6176432882ee58487636d728 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b601482b6176432882ee58487636d728 2024-11-07T17:17:18,991 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b601482b6176432882ee58487636d728, entries=100, sequenceid=91, filesize=9.4 K 2024-11-07T17:17:18,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/145c1a8ec862405b9d61c0e0e1812baf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/145c1a8ec862405b9d61c0e0e1812baf 2024-11-07T17:17:18,995 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/145c1a8ec862405b9d61c0e0e1812baf, entries=100, sequenceid=91, filesize=9.4 K 2024-11-07T17:17:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T17:17:19,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/b277c85dcdf6453dba063dce4389ef33 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/b277c85dcdf6453dba063dce4389ef33 2024-11-07T17:17:19,015 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/b277c85dcdf6453dba063dce4389ef33, entries=100, sequenceid=91, filesize=9.4 K 2024-11-07T17:17:19,016 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 964ms, sequenceid=91, compaction requested=true 2024-11-07T17:17:19,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:19,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:19,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-07T17:17:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-07T17:17:19,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-07T17:17:19,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1190 sec 2024-11-07T17:17:19,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.1250 sec 2024-11-07T17:17:19,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:19,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:17:19,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:19,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:19,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:19,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:19,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:19,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:19,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/f300ce89b09f4afcb08e69edf3f01c9f is 50, key is test_row_0/A:col10/1730999838152/Put/seqid=0 2024-11-07T17:17:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742303_1479 (size=19021) 2024-11-07T17:17:19,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/f300ce89b09f4afcb08e69edf3f01c9f 2024-11-07T17:17:19,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/c2b432aeabbb4fa187da953c15702736 is 50, key is test_row_0/B:col10/1730999838152/Put/seqid=0 2024-11-07T17:17:19,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999899301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999899301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999899302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742304_1480 (size=12001) 2024-11-07T17:17:19,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999899304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/c2b432aeabbb4fa187da953c15702736 2024-11-07T17:17:19,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/74d518de429a49c7a9eca50a2507113a is 50, key is test_row_0/C:col10/1730999838152/Put/seqid=0 2024-11-07T17:17:19,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742305_1481 (size=12001) 2024-11-07T17:17:19,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/74d518de429a49c7a9eca50a2507113a 2024-11-07T17:17:19,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/f300ce89b09f4afcb08e69edf3f01c9f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f300ce89b09f4afcb08e69edf3f01c9f 2024-11-07T17:17:19,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f300ce89b09f4afcb08e69edf3f01c9f, entries=300, sequenceid=115, filesize=18.6 K 2024-11-07T17:17:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/c2b432aeabbb4fa187da953c15702736 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c2b432aeabbb4fa187da953c15702736 2024-11-07T17:17:19,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c2b432aeabbb4fa187da953c15702736, entries=150, sequenceid=115, filesize=11.7 K 2024-11-07T17:17:19,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/74d518de429a49c7a9eca50a2507113a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/74d518de429a49c7a9eca50a2507113a 2024-11-07T17:17:19,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/74d518de429a49c7a9eca50a2507113a, entries=150, sequenceid=115, filesize=11.7 K 2024-11-07T17:17:19,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1711199603e16c41a1a94c45a03f0bd8 in 68ms, sequenceid=115, compaction requested=true 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:19,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:19,354 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:19,354 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:19,356 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52783 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:19,356 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:19,356 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:19,356 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:19,356 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:19,356 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:19,356 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c40aa37913b74728ac6b733cef9acaa5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d5b1dae73b82402e8cbd61052e6a80eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b601482b6176432882ee58487636d728, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f300ce89b09f4afcb08e69edf3f01c9f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=51.5 K 2024-11-07T17:17:19,356 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/14839db64b1d4710b00c798aad8cd655, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/fbd8f591bb5b4ec3b25d2d4a77cae75f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/145c1a8ec862405b9d61c0e0e1812baf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c2b432aeabbb4fa187da953c15702736] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=44.7 K 2024-11-07T17:17:19,357 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c40aa37913b74728ac6b733cef9acaa5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999836341 2024-11-07T17:17:19,357 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 14839db64b1d4710b00c798aad8cd655, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999836341 2024-11-07T17:17:19,357 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5b1dae73b82402e8cbd61052e6a80eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1730999836427 2024-11-07T17:17:19,357 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting fbd8f591bb5b4ec3b25d2d4a77cae75f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1730999836427 2024-11-07T17:17:19,357 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b601482b6176432882ee58487636d728, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730999836985 2024-11-07T17:17:19,357 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 145c1a8ec862405b9d61c0e0e1812baf, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730999836985 2024-11-07T17:17:19,358 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f300ce89b09f4afcb08e69edf3f01c9f, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730999838152 2024-11-07T17:17:19,358 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c2b432aeabbb4fa187da953c15702736, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730999838152 2024-11-07T17:17:19,365 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:19,366 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#400 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:19,366 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/67afb6b9fd894dcf8c668f7ec7ff8567 is 50, key is test_row_0/B:col10/1730999838152/Put/seqid=0 2024-11-07T17:17:19,366 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/8ffc9ec9e1e54634a92cce5162000a09 is 50, key is test_row_0/A:col10/1730999838152/Put/seqid=0 2024-11-07T17:17:19,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742306_1482 (size=12241) 2024-11-07T17:17:19,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742307_1483 (size=12241) 2024-11-07T17:17:19,384 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/67afb6b9fd894dcf8c668f7ec7ff8567 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/67afb6b9fd894dcf8c668f7ec7ff8567 2024-11-07T17:17:19,390 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 67afb6b9fd894dcf8c668f7ec7ff8567(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:19,390 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:19,390 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=12, startTime=1730999839354; duration=0sec 2024-11-07T17:17:19,390 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:19,390 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:19,390 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:19,391 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:19,392 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:19,392 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:19,392 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/39a9c95c7984497ca1949f033851468d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d5959533c3e143748842f992ae74d927, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/b277c85dcdf6453dba063dce4389ef33, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/74d518de429a49c7a9eca50a2507113a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=44.7 K 2024-11-07T17:17:19,392 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 39a9c95c7984497ca1949f033851468d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1730999836341 2024-11-07T17:17:19,392 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d5959533c3e143748842f992ae74d927, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1730999836427 2024-11-07T17:17:19,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b277c85dcdf6453dba063dce4389ef33, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730999836985 2024-11-07T17:17:19,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 74d518de429a49c7a9eca50a2507113a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730999838152 2024-11-07T17:17:19,400 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:19,401 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/0fb5a51e85bc443db71ec785d848535e is 50, key is test_row_0/C:col10/1730999838152/Put/seqid=0 2024-11-07T17:17:19,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742308_1484 (size=12241) 2024-11-07T17:17:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:19,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T17:17:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:19,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:19,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/882294ca131045c1af89b82f26859aa2 is 50, key is test_row_0/A:col10/1730999839417/Put/seqid=0 2024-11-07T17:17:19,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742309_1485 (size=16781) 2024-11-07T17:17:19,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/882294ca131045c1af89b82f26859aa2 2024-11-07T17:17:19,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/4f8e625ef25743cb898c76df4bfdb018 is 50, key is test_row_0/B:col10/1730999839417/Put/seqid=0 2024-11-07T17:17:19,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742310_1486 (size=12101) 2024-11-07T17:17:19,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/4f8e625ef25743cb898c76df4bfdb018 2024-11-07T17:17:19,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/2b6d8a25006f44cbaf64460cbe4a0fb5 is 50, key is test_row_0/C:col10/1730999839417/Put/seqid=0 2024-11-07T17:17:19,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999899454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999899453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999899456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999899456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742311_1487 (size=12101) 2024-11-07T17:17:19,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999899567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999899567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999899567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999899568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,782 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/8ffc9ec9e1e54634a92cce5162000a09 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/8ffc9ec9e1e54634a92cce5162000a09 2024-11-07T17:17:19,789 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into 8ffc9ec9e1e54634a92cce5162000a09(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:19,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:19,790 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=12, startTime=1730999839354; duration=0sec 2024-11-07T17:17:19,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:19,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:19,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999899793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999899793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999899793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999899793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:19,819 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/0fb5a51e85bc443db71ec785d848535e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0fb5a51e85bc443db71ec785d848535e 2024-11-07T17:17:19,823 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 0fb5a51e85bc443db71ec785d848535e(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:19,823 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:19,823 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=12, startTime=1730999839354; duration=0sec 2024-11-07T17:17:19,823 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:19,824 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:19,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/2b6d8a25006f44cbaf64460cbe4a0fb5 2024-11-07T17:17:19,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/882294ca131045c1af89b82f26859aa2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/882294ca131045c1af89b82f26859aa2 2024-11-07T17:17:19,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/882294ca131045c1af89b82f26859aa2, entries=250, sequenceid=130, filesize=16.4 K 2024-11-07T17:17:19,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/4f8e625ef25743cb898c76df4bfdb018 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4f8e625ef25743cb898c76df4bfdb018 2024-11-07T17:17:19,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4f8e625ef25743cb898c76df4bfdb018, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T17:17:19,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/2b6d8a25006f44cbaf64460cbe4a0fb5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2b6d8a25006f44cbaf64460cbe4a0fb5 2024-11-07T17:17:19,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2b6d8a25006f44cbaf64460cbe4a0fb5, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T17:17:19,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1711199603e16c41a1a94c45a03f0bd8 in 471ms, sequenceid=130, compaction requested=false 2024-11-07T17:17:19,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T17:17:20,008 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-07T17:17:20,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:20,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-07T17:17:20,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T17:17:20,011 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:20,012 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:20,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:20,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:20,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:17:20,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:20,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:20,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:20,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:20,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:20,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:20,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/678160f0848740d49b510f5b15475824 is 50, key is test_row_0/A:col10/1730999839454/Put/seqid=0 2024-11-07T17:17:20,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T17:17:20,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742312_1488 (size=14541) 2024-11-07T17:17:20,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/678160f0848740d49b510f5b15475824 2024-11-07T17:17:20,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1810f0b142f2429cb0b0ea1959773e38 is 50, key is test_row_0/B:col10/1730999839454/Put/seqid=0 2024-11-07T17:17:20,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999900119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999900120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742313_1489 (size=12151) 2024-11-07T17:17:20,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999900126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999900130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,164 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:20,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:20,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999900231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999900231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999900236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999900236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T17:17:20,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:20,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:20,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999900436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999900437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999900444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999900444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:20,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:20,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,470 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1810f0b142f2429cb0b0ea1959773e38 2024-11-07T17:17:20,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/cbe8226c3bb34326adc7dd40b52fcf67 is 50, key is test_row_0/C:col10/1730999839454/Put/seqid=0 2024-11-07T17:17:20,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742314_1490 (size=12151) 2024-11-07T17:17:20,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/cbe8226c3bb34326adc7dd40b52fcf67 2024-11-07T17:17:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/678160f0848740d49b510f5b15475824 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/678160f0848740d49b510f5b15475824 2024-11-07T17:17:20,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/678160f0848740d49b510f5b15475824, entries=200, sequenceid=155, filesize=14.2 K 2024-11-07T17:17:20,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1810f0b142f2429cb0b0ea1959773e38 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1810f0b142f2429cb0b0ea1959773e38 2024-11-07T17:17:20,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1810f0b142f2429cb0b0ea1959773e38, entries=150, sequenceid=155, filesize=11.9 K 2024-11-07T17:17:20,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/cbe8226c3bb34326adc7dd40b52fcf67 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cbe8226c3bb34326adc7dd40b52fcf67 2024-11-07T17:17:20,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cbe8226c3bb34326adc7dd40b52fcf67, entries=150, sequenceid=155, filesize=11.9 K 2024-11-07T17:17:20,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1711199603e16c41a1a94c45a03f0bd8 in 477ms, sequenceid=155, compaction requested=true 2024-11-07T17:17:20,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:20,578 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:20,579 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:20,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43563 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:20,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:20,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/8ffc9ec9e1e54634a92cce5162000a09, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/882294ca131045c1af89b82f26859aa2, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/678160f0848740d49b510f5b15475824] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=42.5 K 2024-11-07T17:17:20,580 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:20,581 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:20,581 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,581 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/67afb6b9fd894dcf8c668f7ec7ff8567, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4f8e625ef25743cb898c76df4bfdb018, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1810f0b142f2429cb0b0ea1959773e38] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=35.6 K 2024-11-07T17:17:20,582 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 67afb6b9fd894dcf8c668f7ec7ff8567, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730999838152 2024-11-07T17:17:20,582 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ffc9ec9e1e54634a92cce5162000a09, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730999838152 2024-11-07T17:17:20,582 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f8e625ef25743cb898c76df4bfdb018, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730999839293 2024-11-07T17:17:20,582 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 882294ca131045c1af89b82f26859aa2, keycount=250, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730999839293 2024-11-07T17:17:20,583 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1810f0b142f2429cb0b0ea1959773e38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730999839443 2024-11-07T17:17:20,583 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 678160f0848740d49b510f5b15475824, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730999839443 2024-11-07T17:17:20,592 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:20,592 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#409 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:20,592 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/60065c8946fa4cdfa3d9b05fa6d60fb1 is 50, key is test_row_0/B:col10/1730999839454/Put/seqid=0 2024-11-07T17:17:20,592 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c83d9c4c1ed841c2984bfca621774890 is 50, key is test_row_0/A:col10/1730999839454/Put/seqid=0 2024-11-07T17:17:20,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:20,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:17:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:20,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T17:17:20,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742315_1491 (size=12493) 2024-11-07T17:17:20,622 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:20,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:20,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742316_1492 (size=12493) 2024-11-07T17:17:20,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c88e8494d447483fbf51fa2a36019df7 is 50, key is test_row_0/A:col10/1730999840125/Put/seqid=0 2024-11-07T17:17:20,631 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c83d9c4c1ed841c2984bfca621774890 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c83d9c4c1ed841c2984bfca621774890 2024-11-07T17:17:20,635 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into c83d9c4c1ed841c2984bfca621774890(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:20,636 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:20,636 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=13, startTime=1730999840578; duration=0sec 2024-11-07T17:17:20,636 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:20,636 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:20,636 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:20,637 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:20,637 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:20,637 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,637 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0fb5a51e85bc443db71ec785d848535e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2b6d8a25006f44cbaf64460cbe4a0fb5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cbe8226c3bb34326adc7dd40b52fcf67] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=35.6 K 2024-11-07T17:17:20,637 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fb5a51e85bc443db71ec785d848535e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730999838152 2024-11-07T17:17:20,638 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b6d8a25006f44cbaf64460cbe4a0fb5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730999839293 2024-11-07T17:17:20,639 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbe8226c3bb34326adc7dd40b52fcf67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730999839443 2024-11-07T17:17:20,654 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:20,654 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/2d0508665a7b4b64857aa53606d3f8eb is 50, key is test_row_0/C:col10/1730999839454/Put/seqid=0 2024-11-07T17:17:20,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742317_1493 (size=14541) 2024-11-07T17:17:20,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c88e8494d447483fbf51fa2a36019df7 2024-11-07T17:17:20,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/8e2abb0c419647c4aa2bba3b621d5830 is 50, key is test_row_0/B:col10/1730999840125/Put/seqid=0 2024-11-07T17:17:20,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742318_1494 (size=12493) 2024-11-07T17:17:20,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742319_1495 (size=12151) 2024-11-07T17:17:20,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/8e2abb0c419647c4aa2bba3b621d5830 2024-11-07T17:17:20,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/fa7326d0388f4f3eae0c665b30af65f5 is 50, key is test_row_0/C:col10/1730999840125/Put/seqid=0 2024-11-07T17:17:20,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742320_1496 (size=12151) 2024-11-07T17:17:20,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999900740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999900740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999900743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999900747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999900748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,780 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:20,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999900852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:20,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:20,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:20,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:20,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:20,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:21,026 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/60065c8946fa4cdfa3d9b05fa6d60fb1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/60065c8946fa4cdfa3d9b05fa6d60fb1 2024-11-07T17:17:21,029 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 60065c8946fa4cdfa3d9b05fa6d60fb1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:21,030 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:21,030 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=13, startTime=1730999840579; duration=0sec 2024-11-07T17:17:21,030 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:21,030 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:21,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999901059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,076 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/2d0508665a7b4b64857aa53606d3f8eb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2d0508665a7b4b64857aa53606d3f8eb 2024-11-07T17:17:21,081 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 2d0508665a7b4b64857aa53606d3f8eb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:21,081 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:21,081 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=13, startTime=1730999840579; duration=0sec 2024-11-07T17:17:21,081 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:21,081 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:21,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:21,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:21,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:21,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:21,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:21,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:21,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:21,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/fa7326d0388f4f3eae0c665b30af65f5 2024-11-07T17:17:21,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/c88e8494d447483fbf51fa2a36019df7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c88e8494d447483fbf51fa2a36019df7 2024-11-07T17:17:21,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c88e8494d447483fbf51fa2a36019df7, entries=200, sequenceid=169, filesize=14.2 K 2024-11-07T17:17:21,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/8e2abb0c419647c4aa2bba3b621d5830 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8e2abb0c419647c4aa2bba3b621d5830 2024-11-07T17:17:21,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8e2abb0c419647c4aa2bba3b621d5830, entries=150, sequenceid=169, filesize=11.9 K 2024-11-07T17:17:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/fa7326d0388f4f3eae0c665b30af65f5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fa7326d0388f4f3eae0c665b30af65f5 2024-11-07T17:17:21,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fa7326d0388f4f3eae0c665b30af65f5, entries=150, sequenceid=169, filesize=11.9 K 2024-11-07T17:17:21,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1711199603e16c41a1a94c45a03f0bd8 in 516ms, sequenceid=169, compaction requested=false 2024-11-07T17:17:21,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:21,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T17:17:21,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T17:17:21,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:21,240 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:17:21,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:21,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:21,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:21,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:21,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:21,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:21,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/02cc3a79a3a24947af444b62047901f5 is 50, key is test_row_0/A:col10/1730999840730/Put/seqid=0 2024-11-07T17:17:21,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742321_1497 (size=12151) 2024-11-07T17:17:21,249 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/02cc3a79a3a24947af444b62047901f5 2024-11-07T17:17:21,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:21,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:21,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1d13508373bf415ab00b59945d92134d is 50, key is test_row_0/B:col10/1730999840730/Put/seqid=0 2024-11-07T17:17:21,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742322_1498 (size=12151) 2024-11-07T17:17:21,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999901273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999901275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999901279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999901282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999901364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999901383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999901386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999901386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999901391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999901590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999901595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999901597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999901599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,667 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1d13508373bf415ab00b59945d92134d 2024-11-07T17:17:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/e556da3538834772b2a46bc423f58b5c is 50, key is test_row_0/C:col10/1730999840730/Put/seqid=0 2024-11-07T17:17:21,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742323_1499 (size=12151) 2024-11-07T17:17:21,728 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/e556da3538834772b2a46bc423f58b5c 2024-11-07T17:17:21,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/02cc3a79a3a24947af444b62047901f5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/02cc3a79a3a24947af444b62047901f5 2024-11-07T17:17:21,740 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/02cc3a79a3a24947af444b62047901f5, entries=150, sequenceid=194, filesize=11.9 K 2024-11-07T17:17:21,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1d13508373bf415ab00b59945d92134d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1d13508373bf415ab00b59945d92134d 2024-11-07T17:17:21,744 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1d13508373bf415ab00b59945d92134d, entries=150, sequenceid=194, filesize=11.9 K 2024-11-07T17:17:21,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/e556da3538834772b2a46bc423f58b5c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e556da3538834772b2a46bc423f58b5c 2024-11-07T17:17:21,749 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e556da3538834772b2a46bc423f58b5c, entries=150, sequenceid=194, filesize=11.9 K 2024-11-07T17:17:21,750 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1711199603e16c41a1a94c45a03f0bd8 in 511ms, sequenceid=194, compaction requested=true 2024-11-07T17:17:21,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:21,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:21,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-07T17:17:21,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-07T17:17:21,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-07T17:17:21,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7390 sec 2024-11-07T17:17:21,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7450 sec 2024-11-07T17:17:21,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:21,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:17:21,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:21,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:21,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:21,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:21,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:21,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:21,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/af82db6449504abbbb01fa961ad57ca7 is 50, key is test_row_0/A:col10/1730999841280/Put/seqid=0 2024-11-07T17:17:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742324_1500 (size=14541) 2024-11-07T17:17:21,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999901927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999901928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999901930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999901935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:21,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:21,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999901936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999902038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999902038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999902041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999902043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999902044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T17:17:22,115 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-07T17:17:22,118 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-07T17:17:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T17:17:22,119 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:22,120 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:22,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:22,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T17:17:22,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999902249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999902249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999902249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999902250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999902249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,271 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T17:17:22,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:22,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:22,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:22,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:22,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/af82db6449504abbbb01fa961ad57ca7 2024-11-07T17:17:22,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/34a9d3ace33c48aaa6ab7199072448e1 is 50, key is test_row_0/B:col10/1730999841280/Put/seqid=0 2024-11-07T17:17:22,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742325_1501 (size=12151) 2024-11-07T17:17:22,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/34a9d3ace33c48aaa6ab7199072448e1 2024-11-07T17:17:22,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/517de94153fb4e73bcf184e452c0b679 is 50, key is test_row_0/C:col10/1730999841280/Put/seqid=0 2024-11-07T17:17:22,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742326_1502 (size=12151) 2024-11-07T17:17:22,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/517de94153fb4e73bcf184e452c0b679 2024-11-07T17:17:22,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/af82db6449504abbbb01fa961ad57ca7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/af82db6449504abbbb01fa961ad57ca7 2024-11-07T17:17:22,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/af82db6449504abbbb01fa961ad57ca7, entries=200, sequenceid=209, filesize=14.2 K 2024-11-07T17:17:22,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/34a9d3ace33c48aaa6ab7199072448e1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/34a9d3ace33c48aaa6ab7199072448e1 2024-11-07T17:17:22,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/34a9d3ace33c48aaa6ab7199072448e1, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T17:17:22,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/517de94153fb4e73bcf184e452c0b679 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/517de94153fb4e73bcf184e452c0b679 2024-11-07T17:17:22,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/517de94153fb4e73bcf184e452c0b679, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T17:17:22,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 502ms, sequenceid=209, compaction requested=true 2024-11-07T17:17:22,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:22,376 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:22,377 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:22,379 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53726 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:22,379 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:22,379 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,379 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c83d9c4c1ed841c2984bfca621774890, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c88e8494d447483fbf51fa2a36019df7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/02cc3a79a3a24947af444b62047901f5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/af82db6449504abbbb01fa961ad57ca7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=52.5 K 2024-11-07T17:17:22,379 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:22,379 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:22,379 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,380 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c83d9c4c1ed841c2984bfca621774890, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730999839443 2024-11-07T17:17:22,380 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/60065c8946fa4cdfa3d9b05fa6d60fb1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8e2abb0c419647c4aa2bba3b621d5830, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1d13508373bf415ab00b59945d92134d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/34a9d3ace33c48aaa6ab7199072448e1] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=47.8 K 2024-11-07T17:17:22,380 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c88e8494d447483fbf51fa2a36019df7, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1730999840116 2024-11-07T17:17:22,380 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02cc3a79a3a24947af444b62047901f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999840730 2024-11-07T17:17:22,380 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 60065c8946fa4cdfa3d9b05fa6d60fb1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730999839443 2024-11-07T17:17:22,381 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting af82db6449504abbbb01fa961ad57ca7, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999841266 2024-11-07T17:17:22,381 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e2abb0c419647c4aa2bba3b621d5830, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1730999840116 2024-11-07T17:17:22,381 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d13508373bf415ab00b59945d92134d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999840730 2024-11-07T17:17:22,382 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 34a9d3ace33c48aaa6ab7199072448e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999841266 2024-11-07T17:17:22,392 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:22,392 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/d2e1bb4e50d3479eafae3e8a7cfb33e9 is 50, key is test_row_0/A:col10/1730999841280/Put/seqid=0 2024-11-07T17:17:22,398 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#421 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:22,399 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/88872c1a058f40e39e000acc8b604d19 is 50, key is test_row_0/B:col10/1730999841280/Put/seqid=0 2024-11-07T17:17:22,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742327_1503 (size=12629) 2024-11-07T17:17:22,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742328_1504 (size=12629) 2024-11-07T17:17:22,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T17:17:22,424 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,425 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:22,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:22,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/4da63d1441ac475a829eb83f61e8207e is 50, key is test_row_0/A:col10/1730999841933/Put/seqid=0 2024-11-07T17:17:22,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742329_1505 (size=12151) 2024-11-07T17:17:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:22,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999902572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999902573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999902578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999902584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999902584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999902685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999902685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999902685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999902697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999902697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T17:17:22,812 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/d2e1bb4e50d3479eafae3e8a7cfb33e9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d2e1bb4e50d3479eafae3e8a7cfb33e9 2024-11-07T17:17:22,816 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into d2e1bb4e50d3479eafae3e8a7cfb33e9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:22,816 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:22,817 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=12, startTime=1730999842376; duration=0sec 2024-11-07T17:17:22,817 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:22,817 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:22,817 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:22,818 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:22,818 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:22,818 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,818 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2d0508665a7b4b64857aa53606d3f8eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fa7326d0388f4f3eae0c665b30af65f5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e556da3538834772b2a46bc423f58b5c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/517de94153fb4e73bcf184e452c0b679] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=47.8 K 2024-11-07T17:17:22,819 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d0508665a7b4b64857aa53606d3f8eb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730999839443 2024-11-07T17:17:22,819 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa7326d0388f4f3eae0c665b30af65f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1730999840116 2024-11-07T17:17:22,819 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e556da3538834772b2a46bc423f58b5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1730999840730 2024-11-07T17:17:22,820 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 517de94153fb4e73bcf184e452c0b679, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999841266 2024-11-07T17:17:22,822 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/88872c1a058f40e39e000acc8b604d19 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/88872c1a058f40e39e000acc8b604d19 2024-11-07T17:17:22,826 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 88872c1a058f40e39e000acc8b604d19(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:22,826 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:22,826 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=12, startTime=1730999842377; duration=0sec 2024-11-07T17:17:22,826 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:22,826 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:22,834 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/4da63d1441ac475a829eb83f61e8207e 2024-11-07T17:17:22,835 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#423 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:22,835 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/9c432c1ba3f2424c928c7407e7bc6e26 is 50, key is test_row_0/C:col10/1730999841280/Put/seqid=0 2024-11-07T17:17:22,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/c1cd030f1eef4d1c85cdc5e0626355f6 is 50, key is test_row_0/B:col10/1730999841933/Put/seqid=0 2024-11-07T17:17:22,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742330_1506 (size=12629) 2024-11-07T17:17:22,852 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/9c432c1ba3f2424c928c7407e7bc6e26 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9c432c1ba3f2424c928c7407e7bc6e26 2024-11-07T17:17:22,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742331_1507 (size=12151) 2024-11-07T17:17:22,857 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 9c432c1ba3f2424c928c7407e7bc6e26(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:22,857 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:22,857 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=12, startTime=1730999842377; duration=0sec 2024-11-07T17:17:22,858 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:22,858 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:22,858 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/c1cd030f1eef4d1c85cdc5e0626355f6 2024-11-07T17:17:22,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/a56a8ed566b6498ca8f707340ba33dcb is 50, key is test_row_0/C:col10/1730999841933/Put/seqid=0 2024-11-07T17:17:22,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742332_1508 (size=12151) 2024-11-07T17:17:22,870 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/a56a8ed566b6498ca8f707340ba33dcb 2024-11-07T17:17:22,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/4da63d1441ac475a829eb83f61e8207e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/4da63d1441ac475a829eb83f61e8207e 2024-11-07T17:17:22,877 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/4da63d1441ac475a829eb83f61e8207e, entries=150, sequenceid=230, filesize=11.9 K 2024-11-07T17:17:22,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/c1cd030f1eef4d1c85cdc5e0626355f6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c1cd030f1eef4d1c85cdc5e0626355f6 2024-11-07T17:17:22,881 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c1cd030f1eef4d1c85cdc5e0626355f6, entries=150, sequenceid=230, filesize=11.9 K 2024-11-07T17:17:22,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/a56a8ed566b6498ca8f707340ba33dcb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a56a8ed566b6498ca8f707340ba33dcb 2024-11-07T17:17:22,885 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a56a8ed566b6498ca8f707340ba33dcb, entries=150, sequenceid=230, filesize=11.9 K 2024-11-07T17:17:22,886 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 1711199603e16c41a1a94c45a03f0bd8 in 461ms, sequenceid=230, compaction requested=false 2024-11-07T17:17:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-07T17:17:22,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-07T17:17:22,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-07T17:17:22,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 768 msec 2024-11-07T17:17:22,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 772 msec 2024-11-07T17:17:22,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:22,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T17:17:22,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:22,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:22,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:22,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:22,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:22,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:22,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/420083e621094671bcdc85451e218d03 is 50, key is test_row_0/A:col10/1730999842577/Put/seqid=0 2024-11-07T17:17:22,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742333_1509 (size=14541) 2024-11-07T17:17:22,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/420083e621094671bcdc85451e218d03 2024-11-07T17:17:22,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/32999c953e514f0aa0519347724a3172 is 50, key is test_row_0/B:col10/1730999842577/Put/seqid=0 2024-11-07T17:17:22,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742334_1510 (size=12151) 2024-11-07T17:17:22,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/32999c953e514f0aa0519347724a3172 2024-11-07T17:17:22,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/8f0a8d01eb2e4145bd134c6471cf0fcd is 50, key is test_row_0/C:col10/1730999842577/Put/seqid=0 2024-11-07T17:17:22,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742335_1511 (size=12151) 2024-11-07T17:17:22,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999902926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999902927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999902979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:22,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999902980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:22,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999902980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999903081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999903081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999903091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999903091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999903091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T17:17:23,222 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-07T17:17:23,224 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:23,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-07T17:17:23,226 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:23,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T17:17:23,229 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:23,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:23,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999903284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999903284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999903295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999903295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999903296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T17:17:23,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/8f0a8d01eb2e4145bd134c6471cf0fcd 2024-11-07T17:17:23,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/420083e621094671bcdc85451e218d03 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/420083e621094671bcdc85451e218d03 2024-11-07T17:17:23,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/420083e621094671bcdc85451e218d03, entries=200, sequenceid=250, filesize=14.2 K 2024-11-07T17:17:23,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/32999c953e514f0aa0519347724a3172 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/32999c953e514f0aa0519347724a3172 2024-11-07T17:17:23,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-07T17:17:23,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:23,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:23,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:23,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:23,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:23,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:23,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/32999c953e514f0aa0519347724a3172, entries=150, sequenceid=250, filesize=11.9 K 2024-11-07T17:17:23,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/8f0a8d01eb2e4145bd134c6471cf0fcd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8f0a8d01eb2e4145bd134c6471cf0fcd 2024-11-07T17:17:23,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8f0a8d01eb2e4145bd134c6471cf0fcd, entries=150, sequenceid=250, filesize=11.9 K 2024-11-07T17:17:23,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 1711199603e16c41a1a94c45a03f0bd8 in 496ms, sequenceid=250, compaction requested=true 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:23,391 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:23,391 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:23,392 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:23,392 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:23,392 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:23,392 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d2e1bb4e50d3479eafae3e8a7cfb33e9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/4da63d1441ac475a829eb83f61e8207e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/420083e621094671bcdc85451e218d03] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=38.4 K 2024-11-07T17:17:23,392 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:23,392 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:23,392 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:23,392 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2e1bb4e50d3479eafae3e8a7cfb33e9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999841266 2024-11-07T17:17:23,392 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/88872c1a058f40e39e000acc8b604d19, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c1cd030f1eef4d1c85cdc5e0626355f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/32999c953e514f0aa0519347724a3172] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.1 K 2024-11-07T17:17:23,393 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4da63d1441ac475a829eb83f61e8207e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1730999841928 2024-11-07T17:17:23,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 88872c1a058f40e39e000acc8b604d19, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999841266 2024-11-07T17:17:23,393 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 420083e621094671bcdc85451e218d03, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999842571 2024-11-07T17:17:23,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c1cd030f1eef4d1c85cdc5e0626355f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1730999841928 2024-11-07T17:17:23,393 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 32999c953e514f0aa0519347724a3172, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999842571 2024-11-07T17:17:23,400 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#430 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:23,400 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/f16848d12ee94178b63fe65fe308aa94 is 50, key is test_row_0/B:col10/1730999842577/Put/seqid=0 2024-11-07T17:17:23,400 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:23,401 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/9fd9560861394bada3a51f4d9c44c9da is 50, key is test_row_0/A:col10/1730999842577/Put/seqid=0 2024-11-07T17:17:23,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742336_1512 (size=12731) 2024-11-07T17:17:23,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742337_1513 (size=12731) 2024-11-07T17:17:23,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T17:17:23,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-07T17:17:23,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:23,534 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T17:17:23,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:23,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:23,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:23,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:23,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:23,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:23,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/b09e0b2803ec44bdad85100a5cb1e9f6 is 50, key is test_row_0/A:col10/1730999842926/Put/seqid=0 2024-11-07T17:17:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742338_1514 (size=12301) 2024-11-07T17:17:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:23,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:23,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999903612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999903613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999903614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999903618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999903619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999903722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999903722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999903722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999903727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999903728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,823 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/9fd9560861394bada3a51f4d9c44c9da as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9fd9560861394bada3a51f4d9c44c9da 2024-11-07T17:17:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T17:17:23,829 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into 9fd9560861394bada3a51f4d9c44c9da(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:23,829 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:23,829 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=13, startTime=1730999843391; duration=0sec 2024-11-07T17:17:23,829 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:23,829 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:23,829 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:23,830 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:23,830 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:23,830 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:23,830 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9c432c1ba3f2424c928c7407e7bc6e26, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a56a8ed566b6498ca8f707340ba33dcb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8f0a8d01eb2e4145bd134c6471cf0fcd] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.1 K 2024-11-07T17:17:23,830 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c432c1ba3f2424c928c7407e7bc6e26, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730999841266 2024-11-07T17:17:23,831 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a56a8ed566b6498ca8f707340ba33dcb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1730999841928 2024-11-07T17:17:23,831 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f0a8d01eb2e4145bd134c6471cf0fcd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999842571 2024-11-07T17:17:23,840 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/f16848d12ee94178b63fe65fe308aa94 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/f16848d12ee94178b63fe65fe308aa94 2024-11-07T17:17:23,844 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#432 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:23,844 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0 is 50, key is test_row_0/C:col10/1730999842577/Put/seqid=0 2024-11-07T17:17:23,847 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into f16848d12ee94178b63fe65fe308aa94(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:23,847 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:23,847 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=13, startTime=1730999843391; duration=0sec 2024-11-07T17:17:23,847 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:23,847 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:23,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742339_1515 (size=12731) 2024-11-07T17:17:23,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999903928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999903929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999903929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999903929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999903936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:23,950 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/b09e0b2803ec44bdad85100a5cb1e9f6 2024-11-07T17:17:23,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/5eb5fb214b814727bc330325486cb8b1 is 50, key is test_row_0/B:col10/1730999842926/Put/seqid=0 2024-11-07T17:17:23,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742340_1516 (size=12301) 2024-11-07T17:17:23,984 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/5eb5fb214b814727bc330325486cb8b1 2024-11-07T17:17:23,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/32bfbbb7576647a58ec5d405f3e5bf4b is 50, key is test_row_0/C:col10/1730999842926/Put/seqid=0 2024-11-07T17:17:23,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742341_1517 (size=12301) 2024-11-07T17:17:24,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999904236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999904236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999904236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999904237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999904241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,270 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0 2024-11-07T17:17:24,274 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into bc3bacfcc51640fc8e66ea7ab9b0a0d0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:24,274 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:24,274 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=13, startTime=1730999843391; duration=0sec 2024-11-07T17:17:24,275 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:24,275 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T17:17:24,398 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/32bfbbb7576647a58ec5d405f3e5bf4b 2024-11-07T17:17:24,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/b09e0b2803ec44bdad85100a5cb1e9f6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b09e0b2803ec44bdad85100a5cb1e9f6 2024-11-07T17:17:24,411 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b09e0b2803ec44bdad85100a5cb1e9f6, entries=150, sequenceid=270, filesize=12.0 K 2024-11-07T17:17:24,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/5eb5fb214b814727bc330325486cb8b1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5eb5fb214b814727bc330325486cb8b1 2024-11-07T17:17:24,415 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5eb5fb214b814727bc330325486cb8b1, entries=150, sequenceid=270, filesize=12.0 K 2024-11-07T17:17:24,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/32bfbbb7576647a58ec5d405f3e5bf4b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/32bfbbb7576647a58ec5d405f3e5bf4b 2024-11-07T17:17:24,421 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/32bfbbb7576647a58ec5d405f3e5bf4b, entries=150, sequenceid=270, filesize=12.0 K 2024-11-07T17:17:24,422 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 1711199603e16c41a1a94c45a03f0bd8 in 888ms, sequenceid=270, compaction requested=false 2024-11-07T17:17:24,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:24,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:24,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-07T17:17:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-07T17:17:24,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-07T17:17:24,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1940 sec 2024-11-07T17:17:24,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.2010 sec 2024-11-07T17:17:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:24,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T17:17:24,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:24,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:24,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:24,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:24,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:24,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:24,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/81d9f3f8c1e047f7b96f3331ca1d0fdd is 50, key is test_row_0/A:col10/1730999843608/Put/seqid=0 2024-11-07T17:17:24,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742342_1518 (size=14741) 2024-11-07T17:17:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999904768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999904769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999904769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999904770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999904771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999904875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999904875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999904875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999904877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:24,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999904877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999905080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999905081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999905081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999905081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999905081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/81d9f3f8c1e047f7b96f3331ca1d0fdd 2024-11-07T17:17:25,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/eb0cdcde49d64bbca96e0692e79acc44 is 50, key is test_row_0/B:col10/1730999843608/Put/seqid=0 2024-11-07T17:17:25,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742343_1519 (size=12301) 2024-11-07T17:17:25,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/eb0cdcde49d64bbca96e0692e79acc44 2024-11-07T17:17:25,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/c78e6267159a48eca453104e7cd50e22 is 50, key is test_row_0/C:col10/1730999843608/Put/seqid=0 2024-11-07T17:17:25,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742344_1520 (size=12301) 2024-11-07T17:17:25,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/c78e6267159a48eca453104e7cd50e22 2024-11-07T17:17:25,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/81d9f3f8c1e047f7b96f3331ca1d0fdd as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/81d9f3f8c1e047f7b96f3331ca1d0fdd 2024-11-07T17:17:25,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/81d9f3f8c1e047f7b96f3331ca1d0fdd, entries=200, sequenceid=290, filesize=14.4 K 2024-11-07T17:17:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/eb0cdcde49d64bbca96e0692e79acc44 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/eb0cdcde49d64bbca96e0692e79acc44 2024-11-07T17:17:25,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/eb0cdcde49d64bbca96e0692e79acc44, entries=150, sequenceid=290, filesize=12.0 K 2024-11-07T17:17:25,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/c78e6267159a48eca453104e7cd50e22 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c78e6267159a48eca453104e7cd50e22 2024-11-07T17:17:25,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c78e6267159a48eca453104e7cd50e22, entries=150, sequenceid=290, filesize=12.0 K 2024-11-07T17:17:25,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 1711199603e16c41a1a94c45a03f0bd8 in 467ms, sequenceid=290, compaction requested=true 2024-11-07T17:17:25,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:25,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:25,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:25,211 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:25,211 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:25,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:25,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:25,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:25,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:25,212 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:25,212 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:25,212 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:25,212 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:25,212 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,212 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,213 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/f16848d12ee94178b63fe65fe308aa94, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5eb5fb214b814727bc330325486cb8b1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/eb0cdcde49d64bbca96e0692e79acc44] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.5 K 2024-11-07T17:17:25,213 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9fd9560861394bada3a51f4d9c44c9da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b09e0b2803ec44bdad85100a5cb1e9f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/81d9f3f8c1e047f7b96f3331ca1d0fdd] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=38.8 K 2024-11-07T17:17:25,213 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fd9560861394bada3a51f4d9c44c9da, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999842571 2024-11-07T17:17:25,213 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting f16848d12ee94178b63fe65fe308aa94, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999842571 2024-11-07T17:17:25,213 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b09e0b2803ec44bdad85100a5cb1e9f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730999842926 2024-11-07T17:17:25,213 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eb5fb214b814727bc330325486cb8b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730999842926 2024-11-07T17:17:25,213 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81d9f3f8c1e047f7b96f3331ca1d0fdd, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999843608 2024-11-07T17:17:25,214 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting eb0cdcde49d64bbca96e0692e79acc44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999843608 2024-11-07T17:17:25,221 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:25,222 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e9564afe4c63460d8946c9911fa79dea is 50, key is test_row_0/A:col10/1730999843608/Put/seqid=0 2024-11-07T17:17:25,231 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:25,232 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/8ea41b0e122943e2b7b815c17dd1db82 is 50, key is test_row_0/B:col10/1730999843608/Put/seqid=0 2024-11-07T17:17:25,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742345_1521 (size=12983) 2024-11-07T17:17:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742346_1522 (size=12983) 2024-11-07T17:17:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T17:17:25,330 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-07T17:17:25,331 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-07T17:17:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:25,332 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:25,333 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:25,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:25,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:17:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:25,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:25,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e2b33c250e194ece9ee47b461ede22fa is 50, key is test_row_0/A:col10/1730999844770/Put/seqid=0 2024-11-07T17:17:25,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742347_1523 (size=14741) 2024-11-07T17:17:25,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999905402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999905402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999905403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999905403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999905406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:25,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:25,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:25,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999905510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:25,638 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:25,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:25,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,639 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e9564afe4c63460d8946c9911fa79dea as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e9564afe4c63460d8946c9911fa79dea 2024-11-07T17:17:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,644 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/8ea41b0e122943e2b7b815c17dd1db82 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8ea41b0e122943e2b7b815c17dd1db82 2024-11-07T17:17:25,647 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into e9564afe4c63460d8946c9911fa79dea(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:25,647 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:25,647 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=13, startTime=1730999845211; duration=0sec 2024-11-07T17:17:25,647 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:25,647 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:25,647 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:25,649 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 8ea41b0e122943e2b7b815c17dd1db82(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:25,649 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:25,649 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=13, startTime=1730999845211; duration=0sec 2024-11-07T17:17:25,649 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:25,649 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:25,651 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:25,651 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:25,652 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,652 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/32bfbbb7576647a58ec5d405f3e5bf4b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c78e6267159a48eca453104e7cd50e22] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.5 K 2024-11-07T17:17:25,652 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc3bacfcc51640fc8e66ea7ab9b0a0d0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999842571 2024-11-07T17:17:25,653 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32bfbbb7576647a58ec5d405f3e5bf4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730999842926 2024-11-07T17:17:25,653 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c78e6267159a48eca453104e7cd50e22, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999843608 2024-11-07T17:17:25,659 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:25,660 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/88b3edaadc0e4ead9819cafed2603c3d is 50, key is test_row_0/C:col10/1730999843608/Put/seqid=0 2024-11-07T17:17:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742348_1524 (size=12983) 2024-11-07T17:17:25,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999905713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999905714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999905714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999905715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999905715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,790 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:25,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:25,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e2b33c250e194ece9ee47b461ede22fa 2024-11-07T17:17:25,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/ea8d2436ff9d43b48841ce5686f0f46a is 50, key is test_row_0/B:col10/1730999844770/Put/seqid=0 2024-11-07T17:17:25,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742349_1525 (size=12301) 2024-11-07T17:17:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:25,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:25,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:25,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:25,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:25,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999906016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999906019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999906019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999906020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999906021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,075 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/88b3edaadc0e4ead9819cafed2603c3d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/88b3edaadc0e4ead9819cafed2603c3d 2024-11-07T17:17:26,079 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 88b3edaadc0e4ead9819cafed2603c3d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:26,079 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:26,079 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=13, startTime=1730999845212; duration=0sec 2024-11-07T17:17:26,079 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:26,079 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:26,096 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:26,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/ea8d2436ff9d43b48841ce5686f0f46a 2024-11-07T17:17:26,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/4e00d5bddeff420e901ec00a59cab2fa is 50, key is test_row_0/C:col10/1730999844770/Put/seqid=0 2024-11-07T17:17:26,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742350_1526 (size=12301) 2024-11-07T17:17:26,248 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:26,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:26,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:26,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999906523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999906527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999906527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999906528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999906529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,554 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:26,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:26,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/4e00d5bddeff420e901ec00a59cab2fa 2024-11-07T17:17:26,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e2b33c250e194ece9ee47b461ede22fa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e2b33c250e194ece9ee47b461ede22fa 2024-11-07T17:17:26,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e2b33c250e194ece9ee47b461ede22fa, entries=200, sequenceid=311, filesize=14.4 K 2024-11-07T17:17:26,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/ea8d2436ff9d43b48841ce5686f0f46a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/ea8d2436ff9d43b48841ce5686f0f46a 2024-11-07T17:17:26,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/ea8d2436ff9d43b48841ce5686f0f46a, entries=150, sequenceid=311, filesize=12.0 K 2024-11-07T17:17:26,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/4e00d5bddeff420e901ec00a59cab2fa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/4e00d5bddeff420e901ec00a59cab2fa 2024-11-07T17:17:26,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/4e00d5bddeff420e901ec00a59cab2fa, entries=150, sequenceid=311, filesize=12.0 K 2024-11-07T17:17:26,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 1711199603e16c41a1a94c45a03f0bd8 in 1244ms, sequenceid=311, compaction requested=false 2024-11-07T17:17:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:26,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:26,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-07T17:17:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:26,707 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:17:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:26,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/5d5d4d1b2fac49f39f133f4685bbeddb is 50, key is test_row_0/A:col10/1730999845398/Put/seqid=0 2024-11-07T17:17:26,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742351_1527 (size=12301) 2024-11-07T17:17:27,115 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/5d5d4d1b2fac49f39f133f4685bbeddb 2024-11-07T17:17:27,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b865835dbc664dbf98afcc79332e9644 is 50, key is test_row_0/B:col10/1730999845398/Put/seqid=0 2024-11-07T17:17:27,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742352_1528 (size=12301) 2024-11-07T17:17:27,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:27,524 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b865835dbc664dbf98afcc79332e9644 2024-11-07T17:17:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:27,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:27,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/1e34c2b306ad4fdface66278e6cfe493 is 50, key is test_row_0/C:col10/1730999845398/Put/seqid=0 2024-11-07T17:17:27,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742353_1529 (size=12301) 2024-11-07T17:17:27,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999907551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999907552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999907553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999907557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999907560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999907661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999907661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999907661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999907662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999907666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999907864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999907865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999907865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999907867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999907869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:27,936 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/1e34c2b306ad4fdface66278e6cfe493 2024-11-07T17:17:27,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/5d5d4d1b2fac49f39f133f4685bbeddb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d5d4d1b2fac49f39f133f4685bbeddb 2024-11-07T17:17:27,943 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d5d4d1b2fac49f39f133f4685bbeddb, entries=150, sequenceid=329, filesize=12.0 K 2024-11-07T17:17:27,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b865835dbc664dbf98afcc79332e9644 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b865835dbc664dbf98afcc79332e9644 2024-11-07T17:17:27,946 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b865835dbc664dbf98afcc79332e9644, entries=150, sequenceid=329, filesize=12.0 K 2024-11-07T17:17:27,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/1e34c2b306ad4fdface66278e6cfe493 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e34c2b306ad4fdface66278e6cfe493 2024-11-07T17:17:27,950 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e34c2b306ad4fdface66278e6cfe493, entries=150, sequenceid=329, filesize=12.0 K 2024-11-07T17:17:27,951 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 1244ms, sequenceid=329, compaction requested=true 2024-11-07T17:17:27,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:27,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:27,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-07T17:17:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-07T17:17:27,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-07T17:17:27,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6190 sec 2024-11-07T17:17:27,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.6220 sec 2024-11-07T17:17:28,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:28,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:17:28,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:28,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:28,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:28,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:28,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:28,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:28,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/f520ea0e775547e0b560a515c41b68a7 is 50, key is test_row_0/A:col10/1730999847558/Put/seqid=0 2024-11-07T17:17:28,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742354_1530 (size=14741) 2024-11-07T17:17:28,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/f520ea0e775547e0b560a515c41b68a7 2024-11-07T17:17:28,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1084fa1dccf24917afd970e4443886f8 is 50, key is test_row_0/B:col10/1730999847558/Put/seqid=0 2024-11-07T17:17:28,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999908181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999908187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742355_1531 (size=12301) 2024-11-07T17:17:28,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1084fa1dccf24917afd970e4443886f8 2024-11-07T17:17:28,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/a7aac47356b840adac08954d2024ade7 is 50, key is test_row_0/C:col10/1730999847558/Put/seqid=0 2024-11-07T17:17:28,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999908189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742356_1532 (size=12301) 2024-11-07T17:17:28,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999908190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999908191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999908292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999908292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999908299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999908302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999908302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999908495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999908496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999908503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999908506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999908506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/a7aac47356b840adac08954d2024ade7 2024-11-07T17:17:28,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/f520ea0e775547e0b560a515c41b68a7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f520ea0e775547e0b560a515c41b68a7 2024-11-07T17:17:28,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f520ea0e775547e0b560a515c41b68a7, entries=200, sequenceid=351, filesize=14.4 K 2024-11-07T17:17:28,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/1084fa1dccf24917afd970e4443886f8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1084fa1dccf24917afd970e4443886f8 2024-11-07T17:17:28,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1084fa1dccf24917afd970e4443886f8, entries=150, sequenceid=351, filesize=12.0 K 2024-11-07T17:17:28,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/a7aac47356b840adac08954d2024ade7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a7aac47356b840adac08954d2024ade7 2024-11-07T17:17:28,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a7aac47356b840adac08954d2024ade7, entries=150, sequenceid=351, filesize=12.0 K 2024-11-07T17:17:28,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1711199603e16c41a1a94c45a03f0bd8 in 443ms, sequenceid=351, compaction requested=true 2024-11-07T17:17:28,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:28,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:28,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:28,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:28,616 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:28,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:28,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:28,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:28,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54766 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:28,617 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:28,617 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e9564afe4c63460d8946c9911fa79dea, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e2b33c250e194ece9ee47b461ede22fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d5d4d1b2fac49f39f133f4685bbeddb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f520ea0e775547e0b560a515c41b68a7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=53.5 K 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:28,617 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:28,617 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8ea41b0e122943e2b7b815c17dd1db82, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/ea8d2436ff9d43b48841ce5686f0f46a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b865835dbc664dbf98afcc79332e9644, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1084fa1dccf24917afd970e4443886f8] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=48.7 K 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9564afe4c63460d8946c9911fa79dea, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999843608 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2b33c250e194ece9ee47b461ede22fa, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730999844759 2024-11-07T17:17:28,617 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ea41b0e122943e2b7b815c17dd1db82, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999843608 2024-11-07T17:17:28,618 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ea8d2436ff9d43b48841ce5686f0f46a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730999844768 2024-11-07T17:17:28,618 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d5d4d1b2fac49f39f133f4685bbeddb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1730999845398 2024-11-07T17:17:28,618 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f520ea0e775547e0b560a515c41b68a7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1730999847551 2024-11-07T17:17:28,618 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b865835dbc664dbf98afcc79332e9644, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1730999845398 2024-11-07T17:17:28,618 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1084fa1dccf24917afd970e4443886f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1730999847556 2024-11-07T17:17:28,625 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:28,625 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/bd5253c83a344c80a0d40e12d0296619 is 50, key is test_row_0/A:col10/1730999847558/Put/seqid=0 2024-11-07T17:17:28,628 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#451 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:28,629 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/2b9316d3d2cc40f2ac358b6aa6179743 is 50, key is test_row_0/B:col10/1730999847558/Put/seqid=0 2024-11-07T17:17:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742357_1533 (size=13119) 2024-11-07T17:17:28,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742358_1534 (size=13119) 2024-11-07T17:17:28,636 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/2b9316d3d2cc40f2ac358b6aa6179743 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/2b9316d3d2cc40f2ac358b6aa6179743 2024-11-07T17:17:28,640 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 2b9316d3d2cc40f2ac358b6aa6179743(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:28,640 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:28,640 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=12, startTime=1730999848615; duration=0sec 2024-11-07T17:17:28,641 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:28,641 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:28,641 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:28,641 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:28,642 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:28,642 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:28,642 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/88b3edaadc0e4ead9819cafed2603c3d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/4e00d5bddeff420e901ec00a59cab2fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e34c2b306ad4fdface66278e6cfe493, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a7aac47356b840adac08954d2024ade7] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=48.7 K 2024-11-07T17:17:28,642 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 88b3edaadc0e4ead9819cafed2603c3d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1730999843608 2024-11-07T17:17:28,642 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e00d5bddeff420e901ec00a59cab2fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730999844768 2024-11-07T17:17:28,642 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e34c2b306ad4fdface66278e6cfe493, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1730999845398 2024-11-07T17:17:28,643 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a7aac47356b840adac08954d2024ade7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1730999847556 2024-11-07T17:17:28,649 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#452 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:28,649 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/fb0a389d6a704275a372e9b5c232c7d1 is 50, key is test_row_0/C:col10/1730999847558/Put/seqid=0 2024-11-07T17:17:28,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742359_1535 (size=13119) 2024-11-07T17:17:28,657 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/fb0a389d6a704275a372e9b5c232c7d1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fb0a389d6a704275a372e9b5c232c7d1 2024-11-07T17:17:28,661 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into fb0a389d6a704275a372e9b5c232c7d1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:28,661 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:28,661 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=12, startTime=1730999848616; duration=0sec 2024-11-07T17:17:28,661 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:28,661 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:28,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:28,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:17:28,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:28,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:28,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:28,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:28,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:28,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:28,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/9c8c64f14d1e46f599d1f8012ea7c0ab is 50, key is test_row_0/A:col10/1730999848801/Put/seqid=0 2024-11-07T17:17:28,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742360_1536 (size=12301) 2024-11-07T17:17:28,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/9c8c64f14d1e46f599d1f8012ea7c0ab 2024-11-07T17:17:28,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2d1a7f4370b458ca60ee88bb3a7db8d is 50, key is test_row_0/B:col10/1730999848801/Put/seqid=0 2024-11-07T17:17:28,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742361_1537 (size=12301) 2024-11-07T17:17:28,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2d1a7f4370b458ca60ee88bb3a7db8d 2024-11-07T17:17:28,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999908825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/c8d8d8f021aa410d9f26a22cb8b2e043 is 50, key is test_row_0/C:col10/1730999848801/Put/seqid=0 2024-11-07T17:17:28,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999908829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999908830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999908831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999908834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742362_1538 (size=12301) 2024-11-07T17:17:28,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999908935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999908939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999908939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999908939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:28,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999908942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,035 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/bd5253c83a344c80a0d40e12d0296619 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/bd5253c83a344c80a0d40e12d0296619 2024-11-07T17:17:29,039 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into bd5253c83a344c80a0d40e12d0296619(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:29,039 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:29,039 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=12, startTime=1730999848615; duration=0sec 2024-11-07T17:17:29,039 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:29,039 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:29,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999909140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999909142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999909144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999909145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999909148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/c8d8d8f021aa410d9f26a22cb8b2e043 2024-11-07T17:17:29,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/9c8c64f14d1e46f599d1f8012ea7c0ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9c8c64f14d1e46f599d1f8012ea7c0ab 2024-11-07T17:17:29,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9c8c64f14d1e46f599d1f8012ea7c0ab, entries=150, sequenceid=368, filesize=12.0 K 2024-11-07T17:17:29,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2d1a7f4370b458ca60ee88bb3a7db8d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2d1a7f4370b458ca60ee88bb3a7db8d 2024-11-07T17:17:29,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2d1a7f4370b458ca60ee88bb3a7db8d, entries=150, sequenceid=368, filesize=12.0 K 2024-11-07T17:17:29,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/c8d8d8f021aa410d9f26a22cb8b2e043 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c8d8d8f021aa410d9f26a22cb8b2e043 2024-11-07T17:17:29,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c8d8d8f021aa410d9f26a22cb8b2e043, entries=150, sequenceid=368, filesize=12.0 K 2024-11-07T17:17:29,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 458ms, sequenceid=368, compaction requested=false 2024-11-07T17:17:29,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-07T17:17:29,437 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-07T17:17:29,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:29,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-07T17:17:29,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:29,439 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:29,440 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:29,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:29,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:29,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:17:29,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:29,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:29,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:29,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:29,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:29,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:29,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/866a4d80fa154e8ab8584bbe8beec7aa is 50, key is test_row_0/A:col10/1730999848828/Put/seqid=0 2024-11-07T17:17:29,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742363_1539 (size=17181) 2024-11-07T17:17:29,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999909459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999909461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999909461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999909462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999909462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:29,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999909565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999909569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999909569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999909569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999909570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,591 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-07T17:17:29,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:29,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:29,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:29,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:29,744 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-07T17:17:29,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:29,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:29,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:29,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999909768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999909772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999909773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999909773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999909773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/866a4d80fa154e8ab8584bbe8beec7aa 2024-11-07T17:17:29,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/5779f878ce684ecf964b8c9430245e35 is 50, key is test_row_0/B:col10/1730999848828/Put/seqid=0 2024-11-07T17:17:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742364_1540 (size=12301) 2024-11-07T17:17:29,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/5779f878ce684ecf964b8c9430245e35 2024-11-07T17:17:29,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d3b6051b53a341cbb7451c3930d73ff5 is 50, key is test_row_0/C:col10/1730999848828/Put/seqid=0 2024-11-07T17:17:29,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742365_1541 (size=12301) 2024-11-07T17:17:29,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:29,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-07T17:17:29,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:29,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:29,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:29,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:29,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:30,050 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-07T17:17:30,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:30,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999910075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999910076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999910078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999910080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999910080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,203 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-07T17:17:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:30,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d3b6051b53a341cbb7451c3930d73ff5 2024-11-07T17:17:30,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/866a4d80fa154e8ab8584bbe8beec7aa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/866a4d80fa154e8ab8584bbe8beec7aa 2024-11-07T17:17:30,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/866a4d80fa154e8ab8584bbe8beec7aa, entries=250, sequenceid=392, filesize=16.8 K 2024-11-07T17:17:30,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/5779f878ce684ecf964b8c9430245e35 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5779f878ce684ecf964b8c9430245e35 2024-11-07T17:17:30,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5779f878ce684ecf964b8c9430245e35, entries=150, sequenceid=392, filesize=12.0 K 2024-11-07T17:17:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d3b6051b53a341cbb7451c3930d73ff5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d3b6051b53a341cbb7451c3930d73ff5 2024-11-07T17:17:30,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d3b6051b53a341cbb7451c3930d73ff5, entries=150, sequenceid=392, filesize=12.0 K 2024-11-07T17:17:30,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1711199603e16c41a1a94c45a03f0bd8 in 857ms, sequenceid=392, compaction requested=true 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:30,305 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:30,305 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:30,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:30,306 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:30,306 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42601 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:30,306 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:30,306 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:30,306 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,306 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,306 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/2b9316d3d2cc40f2ac358b6aa6179743, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2d1a7f4370b458ca60ee88bb3a7db8d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5779f878ce684ecf964b8c9430245e35] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.8 K 2024-11-07T17:17:30,306 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/bd5253c83a344c80a0d40e12d0296619, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9c8c64f14d1e46f599d1f8012ea7c0ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/866a4d80fa154e8ab8584bbe8beec7aa] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=41.6 K 2024-11-07T17:17:30,307 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b9316d3d2cc40f2ac358b6aa6179743, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1730999847556 2024-11-07T17:17:30,307 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd5253c83a344c80a0d40e12d0296619, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1730999847556 2024-11-07T17:17:30,307 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c8c64f14d1e46f599d1f8012ea7c0ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1730999848187 2024-11-07T17:17:30,307 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b2d1a7f4370b458ca60ee88bb3a7db8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1730999848187 2024-11-07T17:17:30,307 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 866a4d80fa154e8ab8584bbe8beec7aa, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1730999848823 2024-11-07T17:17:30,307 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 5779f878ce684ecf964b8c9430245e35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1730999848828 2024-11-07T17:17:30,314 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#459 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:30,314 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#460 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:30,314 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d9a203faed354cbc9132b21a4d17a288 is 50, key is test_row_0/B:col10/1730999848828/Put/seqid=0 2024-11-07T17:17:30,315 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/82bf24badb4d474a80495c38745406ab is 50, key is test_row_0/A:col10/1730999848828/Put/seqid=0 2024-11-07T17:17:30,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742366_1542 (size=13221) 2024-11-07T17:17:30,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742367_1543 (size=13221) 2024-11-07T17:17:30,355 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,356 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:30,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/314aec1d704b46bdbd90fe6c97ca82ab is 50, key is test_row_0/A:col10/1730999849459/Put/seqid=0 2024-11-07T17:17:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742368_1544 (size=12301) 2024-11-07T17:17:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:30,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:30,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999910603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999910605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999910606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999910609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999910610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999910711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999910711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999910713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999910713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999910717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,730 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d9a203faed354cbc9132b21a4d17a288 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d9a203faed354cbc9132b21a4d17a288 2024-11-07T17:17:30,733 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into d9a203faed354cbc9132b21a4d17a288(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:30,734 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:30,734 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=13, startTime=1730999850305; duration=0sec 2024-11-07T17:17:30,734 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:30,734 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:30,734 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:30,736 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:30,737 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:30,737 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:30,737 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fb0a389d6a704275a372e9b5c232c7d1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c8d8d8f021aa410d9f26a22cb8b2e043, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d3b6051b53a341cbb7451c3930d73ff5] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.8 K 2024-11-07T17:17:30,738 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting fb0a389d6a704275a372e9b5c232c7d1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1730999847556 2024-11-07T17:17:30,738 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/82bf24badb4d474a80495c38745406ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/82bf24badb4d474a80495c38745406ab 2024-11-07T17:17:30,738 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c8d8d8f021aa410d9f26a22cb8b2e043, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1730999848187 2024-11-07T17:17:30,738 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d3b6051b53a341cbb7451c3930d73ff5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1730999848828 2024-11-07T17:17:30,741 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into 82bf24badb4d474a80495c38745406ab(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:30,742 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:30,742 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=13, startTime=1730999850305; duration=0sec 2024-11-07T17:17:30,742 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:30,742 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:30,744 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:30,744 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/7c2968fd8875475ea1c5972483a09b04 is 50, key is test_row_0/C:col10/1730999848828/Put/seqid=0 2024-11-07T17:17:30,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742369_1545 (size=13221) 2024-11-07T17:17:30,764 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/314aec1d704b46bdbd90fe6c97ca82ab 2024-11-07T17:17:30,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2333a46df17419a86188c2c2b23d546 is 50, key is test_row_0/B:col10/1730999849459/Put/seqid=0 2024-11-07T17:17:30,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742370_1546 (size=12301) 2024-11-07T17:17:30,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999910916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999910916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999910918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999910919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:30,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:30,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999910921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,152 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/7c2968fd8875475ea1c5972483a09b04 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7c2968fd8875475ea1c5972483a09b04 2024-11-07T17:17:31,156 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 7c2968fd8875475ea1c5972483a09b04(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:31,156 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:31,156 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=13, startTime=1730999850305; duration=0sec 2024-11-07T17:17:31,156 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:31,156 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:31,174 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2333a46df17419a86188c2c2b23d546 2024-11-07T17:17:31,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d921778379db4eb88cda6fb325651595 is 50, key is test_row_0/C:col10/1730999849459/Put/seqid=0 2024-11-07T17:17:31,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742371_1547 (size=12301) 2024-11-07T17:17:31,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999911222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999911223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999911223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999911224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999911225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:31,583 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d921778379db4eb88cda6fb325651595 2024-11-07T17:17:31,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/314aec1d704b46bdbd90fe6c97ca82ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/314aec1d704b46bdbd90fe6c97ca82ab 2024-11-07T17:17:31,590 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/314aec1d704b46bdbd90fe6c97ca82ab, entries=150, sequenceid=405, filesize=12.0 K 2024-11-07T17:17:31,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2333a46df17419a86188c2c2b23d546 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2333a46df17419a86188c2c2b23d546 2024-11-07T17:17:31,594 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2333a46df17419a86188c2c2b23d546, entries=150, sequenceid=405, filesize=12.0 K 2024-11-07T17:17:31,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/d921778379db4eb88cda6fb325651595 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d921778379db4eb88cda6fb325651595 2024-11-07T17:17:31,598 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d921778379db4eb88cda6fb325651595, entries=150, sequenceid=405, filesize=12.0 K 2024-11-07T17:17:31,599 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1711199603e16c41a1a94c45a03f0bd8 in 1242ms, sequenceid=405, compaction requested=false 2024-11-07T17:17:31,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:31,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:31,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-07T17:17:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-07T17:17:31,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-07T17:17:31,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1600 sec 2024-11-07T17:17:31,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 2.1630 sec 2024-11-07T17:17:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:31,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T17:17:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:31,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/ea2eed37da1649eeac6b0be5f15644f1 is 50, key is test_row_0/A:col10/1730999851730/Put/seqid=0 2024-11-07T17:17:31,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742372_1548 (size=14741) 2024-11-07T17:17:31,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999911737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999911740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999911745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999911746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999911746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999911846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999911847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999911851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999911856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:31,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:31,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999911856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999912051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999912052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999912057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999912061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999912061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/ea2eed37da1649eeac6b0be5f15644f1 2024-11-07T17:17:32,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/56d73be42bfd4fbfa5a65557d994ad0a is 50, key is test_row_0/B:col10/1730999851730/Put/seqid=0 2024-11-07T17:17:32,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742373_1549 (size=12301) 2024-11-07T17:17:32,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/56d73be42bfd4fbfa5a65557d994ad0a 2024-11-07T17:17:32,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/7ef57814e1984e12b7d0b11c19800074 is 50, key is test_row_0/C:col10/1730999851730/Put/seqid=0 2024-11-07T17:17:32,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742374_1550 (size=12301) 2024-11-07T17:17:32,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999912356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999912357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999912363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999912365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999912366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/7ef57814e1984e12b7d0b11c19800074 2024-11-07T17:17:32,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/ea2eed37da1649eeac6b0be5f15644f1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ea2eed37da1649eeac6b0be5f15644f1 2024-11-07T17:17:32,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ea2eed37da1649eeac6b0be5f15644f1, entries=200, sequenceid=433, filesize=14.4 K 2024-11-07T17:17:32,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/56d73be42bfd4fbfa5a65557d994ad0a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/56d73be42bfd4fbfa5a65557d994ad0a 2024-11-07T17:17:32,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/56d73be42bfd4fbfa5a65557d994ad0a, entries=150, sequenceid=433, filesize=12.0 K 2024-11-07T17:17:32,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/7ef57814e1984e12b7d0b11c19800074 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7ef57814e1984e12b7d0b11c19800074 2024-11-07T17:17:32,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7ef57814e1984e12b7d0b11c19800074, entries=150, sequenceid=433, filesize=12.0 K 2024-11-07T17:17:32,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 1711199603e16c41a1a94c45a03f0bd8 in 850ms, sequenceid=433, compaction requested=true 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:32,579 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:32,579 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:32,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:32,580 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:32,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:32,580 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/82bf24badb4d474a80495c38745406ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/314aec1d704b46bdbd90fe6c97ca82ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ea2eed37da1649eeac6b0be5f15644f1] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=39.3 K 2024-11-07T17:17:32,580 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:32,581 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:32,581 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:32,581 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d9a203faed354cbc9132b21a4d17a288, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2333a46df17419a86188c2c2b23d546, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/56d73be42bfd4fbfa5a65557d994ad0a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.9 K 2024-11-07T17:17:32,581 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82bf24badb4d474a80495c38745406ab, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1730999848828 2024-11-07T17:17:32,581 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d9a203faed354cbc9132b21a4d17a288, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1730999848828 2024-11-07T17:17:32,581 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 314aec1d704b46bdbd90fe6c97ca82ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1730999849459 2024-11-07T17:17:32,582 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea2eed37da1649eeac6b0be5f15644f1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1730999850608 2024-11-07T17:17:32,582 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b2333a46df17419a86188c2c2b23d546, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1730999849459 2024-11-07T17:17:32,582 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 56d73be42bfd4fbfa5a65557d994ad0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1730999850608 2024-11-07T17:17:32,590 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:32,590 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/5d4997ce690247fd8b5d44114b66ef70 is 50, key is test_row_0/A:col10/1730999851730/Put/seqid=0 2024-11-07T17:17:32,593 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#469 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:32,593 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2618cb46ff1478283fc25c54821c0e1 is 50, key is test_row_0/B:col10/1730999851730/Put/seqid=0 2024-11-07T17:17:32,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742375_1551 (size=13323) 2024-11-07T17:17:32,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742376_1552 (size=13323) 2024-11-07T17:17:32,610 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/b2618cb46ff1478283fc25c54821c0e1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2618cb46ff1478283fc25c54821c0e1 2024-11-07T17:17:32,615 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into b2618cb46ff1478283fc25c54821c0e1(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:32,615 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:32,615 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=13, startTime=1730999852579; duration=0sec 2024-11-07T17:17:32,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:32,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:32,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:32,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:32,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:32,617 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:32,617 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7c2968fd8875475ea1c5972483a09b04, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d921778379db4eb88cda6fb325651595, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7ef57814e1984e12b7d0b11c19800074] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=36.9 K 2024-11-07T17:17:32,617 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c2968fd8875475ea1c5972483a09b04, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1730999848828 2024-11-07T17:17:32,617 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d921778379db4eb88cda6fb325651595, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1730999849459 2024-11-07T17:17:32,623 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ef57814e1984e12b7d0b11c19800074, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1730999850608 2024-11-07T17:17:32,633 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#470 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:32,633 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/166b84f9a95a40348e08fd53f9710aef is 50, key is test_row_0/C:col10/1730999851730/Put/seqid=0 2024-11-07T17:17:32,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742377_1553 (size=13323) 2024-11-07T17:17:32,640 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/166b84f9a95a40348e08fd53f9710aef as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/166b84f9a95a40348e08fd53f9710aef 2024-11-07T17:17:32,645 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into 166b84f9a95a40348e08fd53f9710aef(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:32,645 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:32,645 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=13, startTime=1730999852579; duration=0sec 2024-11-07T17:17:32,645 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:32,645 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:32,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:32,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:17:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:32,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e494891e80bc4ea38c12fcc59b634d43 is 50, key is test_row_0/A:col10/1730999851745/Put/seqid=0 2024-11-07T17:17:32,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742378_1554 (size=17181) 2024-11-07T17:17:32,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999912888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999912892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999912919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999912919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:32,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:32,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999912919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,010 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/5d4997ce690247fd8b5d44114b66ef70 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d4997ce690247fd8b5d44114b66ef70 2024-11-07T17:17:33,014 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into 5d4997ce690247fd8b5d44114b66ef70(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:33,014 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:33,014 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=13, startTime=1730999852579; duration=0sec 2024-11-07T17:17:33,014 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:33,014 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:33,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999913020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999913020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999913025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999913025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999913025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999913225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999913225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999913233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999913233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999913233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e494891e80bc4ea38c12fcc59b634d43 2024-11-07T17:17:33,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/aebe49c845bb4481a1c77f3414fe79e8 is 50, key is test_row_0/B:col10/1730999851745/Put/seqid=0 2024-11-07T17:17:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742379_1555 (size=12301) 2024-11-07T17:17:33,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999913529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999913532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999913539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T17:17:33,544 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-07T17:17:33,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999913541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999913541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,545 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-07T17:17:33,546 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-07T17:17:33,547 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:33,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:33,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-07T17:17:33,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/aebe49c845bb4481a1c77f3414fe79e8 2024-11-07T17:17:33,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/ef60fd1a4e4641d6ab5e769dd3279f82 is 50, key is test_row_0/C:col10/1730999851745/Put/seqid=0 2024-11-07T17:17:33,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742380_1556 (size=12301) 2024-11-07T17:17:33,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/ef60fd1a4e4641d6ab5e769dd3279f82 2024-11-07T17:17:33,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-07T17:17:33,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:33,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:33,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:33,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:33,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:33,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:33,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/e494891e80bc4ea38c12fcc59b634d43 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e494891e80bc4ea38c12fcc59b634d43 2024-11-07T17:17:33,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e494891e80bc4ea38c12fcc59b634d43, entries=250, sequenceid=450, filesize=16.8 K 2024-11-07T17:17:33,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/aebe49c845bb4481a1c77f3414fe79e8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/aebe49c845bb4481a1c77f3414fe79e8 2024-11-07T17:17:33,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/aebe49c845bb4481a1c77f3414fe79e8, entries=150, sequenceid=450, filesize=12.0 K 2024-11-07T17:17:33,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/ef60fd1a4e4641d6ab5e769dd3279f82 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef60fd1a4e4641d6ab5e769dd3279f82 2024-11-07T17:17:33,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef60fd1a4e4641d6ab5e769dd3279f82, entries=150, sequenceid=450, filesize=12.0 K 2024-11-07T17:17:33,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 848ms, sequenceid=450, compaction requested=false 2024-11-07T17:17:33,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:33,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-07T17:17:33,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:33,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-07T17:17:33,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:33,851 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:17:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:33,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:33,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/ba27f2ebd9434034822333c3d55cc61f is 50, key is test_row_0/A:col10/1730999852897/Put/seqid=0 2024-11-07T17:17:33,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742381_1557 (size=12301) 2024-11-07T17:17:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:34,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:34,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999914052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999914053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999914057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999914059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999914060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-07T17:17:34,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999914161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999914162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999914162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999914166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999914167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,259 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/ba27f2ebd9434034822333c3d55cc61f 2024-11-07T17:17:34,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/39bc29163db74d95b6e14b0be6347eb1 is 50, key is test_row_0/B:col10/1730999852897/Put/seqid=0 2024-11-07T17:17:34,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742382_1558 (size=12301) 2024-11-07T17:17:34,275 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/39bc29163db74d95b6e14b0be6347eb1 2024-11-07T17:17:34,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/22697fb571b14cf783e32a50fe785eaa is 50, key is test_row_0/C:col10/1730999852897/Put/seqid=0 2024-11-07T17:17:34,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742383_1559 (size=12301) 2024-11-07T17:17:34,283 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/22697fb571b14cf783e32a50fe785eaa 2024-11-07T17:17:34,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/ba27f2ebd9434034822333c3d55cc61f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ba27f2ebd9434034822333c3d55cc61f 2024-11-07T17:17:34,289 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ba27f2ebd9434034822333c3d55cc61f, entries=150, sequenceid=472, filesize=12.0 K 2024-11-07T17:17:34,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/39bc29163db74d95b6e14b0be6347eb1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/39bc29163db74d95b6e14b0be6347eb1 2024-11-07T17:17:34,294 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/39bc29163db74d95b6e14b0be6347eb1, entries=150, sequenceid=472, filesize=12.0 K 2024-11-07T17:17:34,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/22697fb571b14cf783e32a50fe785eaa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/22697fb571b14cf783e32a50fe785eaa 2024-11-07T17:17:34,297 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/22697fb571b14cf783e32a50fe785eaa, entries=150, sequenceid=472, filesize=12.0 K 2024-11-07T17:17:34,298 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 1711199603e16c41a1a94c45a03f0bd8 in 448ms, sequenceid=472, compaction requested=true 2024-11-07T17:17:34,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:34,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:34,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-07T17:17:34,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-07T17:17:34,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-07T17:17:34,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 752 msec 2024-11-07T17:17:34,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 756 msec 2024-11-07T17:17:34,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:34,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T17:17:34,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:34,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:34,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:34,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:34,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:34,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:34,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/498147a43f3348d18e7c0e928056ec01 is 50, key is test_row_0/A:col10/1730999854368/Put/seqid=0 2024-11-07T17:17:34,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742384_1560 (size=14741) 2024-11-07T17:17:34,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999914397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999914398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999914401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999914401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999914402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999914505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999914506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999914508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999914508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999914508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-07T17:17:34,649 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-07T17:17:34,650 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:34,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-07T17:17:34,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:34,652 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:34,652 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:34,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:34,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999914711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999914711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999914713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999914713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:34,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999914714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:34,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/498147a43f3348d18e7c0e928056ec01 2024-11-07T17:17:34,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/20e006a7d8284bd3b569159331e801eb is 50, key is test_row_0/B:col10/1730999854368/Put/seqid=0 2024-11-07T17:17:34,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742385_1561 (size=12301) 2024-11-07T17:17:34,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:34,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:34,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:34,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:34,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:34,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:34,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:34,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:34,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:34,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:34,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:34,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:34,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:34,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999915015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999915015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999915018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999915019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999915020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:35,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:35,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/20e006a7d8284bd3b569159331e801eb 2024-11-07T17:17:35,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/0442e9b095594392b4f3650d0aeed13d is 50, key is test_row_0/C:col10/1730999854368/Put/seqid=0 2024-11-07T17:17:35,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742386_1562 (size=12301) 2024-11-07T17:17:35,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:35,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59930 deadline: 1730999915521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59916 deadline: 1730999915522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59874 deadline: 1730999915530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59942 deadline: 1730999915530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:35,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59900 deadline: 1730999915530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:35,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/0442e9b095594392b4f3650d0aeed13d 2024-11-07T17:17:35,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/498147a43f3348d18e7c0e928056ec01 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/498147a43f3348d18e7c0e928056ec01 2024-11-07T17:17:35,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/498147a43f3348d18e7c0e928056ec01, entries=200, sequenceid=488, filesize=14.4 K 2024-11-07T17:17:35,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/20e006a7d8284bd3b569159331e801eb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/20e006a7d8284bd3b569159331e801eb 2024-11-07T17:17:35,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/20e006a7d8284bd3b569159331e801eb, entries=150, sequenceid=488, filesize=12.0 K 2024-11-07T17:17:35,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/0442e9b095594392b4f3650d0aeed13d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0442e9b095594392b4f3650d0aeed13d 2024-11-07T17:17:35,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0442e9b095594392b4f3650d0aeed13d, entries=150, sequenceid=488, filesize=12.0 K 2024-11-07T17:17:35,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 1711199603e16c41a1a94c45a03f0bd8 in 1244ms, sequenceid=488, compaction requested=true 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1711199603e16c41a1a94c45a03f0bd8:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:35,613 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:35,613 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:35,614 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57546 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:35,614 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:35,614 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/A is initiating minor compaction (all files) 2024-11-07T17:17:35,614 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/B is initiating minor compaction (all files) 2024-11-07T17:17:35,614 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/A in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,614 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/B in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,614 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d4997ce690247fd8b5d44114b66ef70, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e494891e80bc4ea38c12fcc59b634d43, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ba27f2ebd9434034822333c3d55cc61f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/498147a43f3348d18e7c0e928056ec01] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=56.2 K 2024-11-07T17:17:35,614 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2618cb46ff1478283fc25c54821c0e1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/aebe49c845bb4481a1c77f3414fe79e8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/39bc29163db74d95b6e14b0be6347eb1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/20e006a7d8284bd3b569159331e801eb] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=49.0 K 2024-11-07T17:17:35,615 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d4997ce690247fd8b5d44114b66ef70, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1730999850608 2024-11-07T17:17:35,615 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting b2618cb46ff1478283fc25c54821c0e1, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1730999850608 2024-11-07T17:17:35,615 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting aebe49c845bb4481a1c77f3414fe79e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1730999851736 2024-11-07T17:17:35,615 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting e494891e80bc4ea38c12fcc59b634d43, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1730999851736 2024-11-07T17:17:35,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 39bc29163db74d95b6e14b0be6347eb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1730999852885 2024-11-07T17:17:35,616 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba27f2ebd9434034822333c3d55cc61f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1730999852885 2024-11-07T17:17:35,616 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 498147a43f3348d18e7c0e928056ec01, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1730999854042 2024-11-07T17:17:35,616 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 20e006a7d8284bd3b569159331e801eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1730999854058 2024-11-07T17:17:35,622 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#B#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:35,623 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/17a0ac229f814cb78d173e04597d4844 is 50, key is test_row_0/B:col10/1730999854368/Put/seqid=0 2024-11-07T17:17:35,626 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#A#compaction#481 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:35,627 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/0a42420ed3b74c5483ac7db126a4150d is 50, key is test_row_0/A:col10/1730999854368/Put/seqid=0 2024-11-07T17:17:35,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742387_1563 (size=13459) 2024-11-07T17:17:35,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742388_1564 (size=13459) 2024-11-07T17:17:35,683 DEBUG [Thread-2067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:64938 2024-11-07T17:17:35,683 DEBUG [Thread-2067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:35,684 DEBUG [Thread-2071 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d49886 to 127.0.0.1:64938 2024-11-07T17:17:35,684 DEBUG [Thread-2071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:35,684 DEBUG [Thread-2063 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:64938 2024-11-07T17:17:35,684 DEBUG [Thread-2063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:35,688 DEBUG [Thread-2069 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:64938 2024-11-07T17:17:35,688 DEBUG [Thread-2069 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:35,689 DEBUG [Thread-2065 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:64938 2024-11-07T17:17:35,689 DEBUG [Thread-2065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:35,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:35,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-07T17:17:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:35,719 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T17:17:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:35,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:35,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:35,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:35,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:35,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:35,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/85b197c2747d4d03bcd24316b6812ab5 is 50, key is test_row_0/A:col10/1730999854397/Put/seqid=0 2024-11-07T17:17:35,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742389_1565 (size=12301) 2024-11-07T17:17:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:36,032 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/17a0ac229f814cb78d173e04597d4844 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/17a0ac229f814cb78d173e04597d4844 2024-11-07T17:17:36,034 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/0a42420ed3b74c5483ac7db126a4150d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/0a42420ed3b74c5483ac7db126a4150d 2024-11-07T17:17:36,036 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/B of 1711199603e16c41a1a94c45a03f0bd8 into 17a0ac229f814cb78d173e04597d4844(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:36,036 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:36,036 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/B, priority=12, startTime=1730999855613; duration=0sec 2024-11-07T17:17:36,036 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:36,036 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:B 2024-11-07T17:17:36,036 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:36,037 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:36,037 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 1711199603e16c41a1a94c45a03f0bd8/C is initiating minor compaction (all files) 2024-11-07T17:17:36,037 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/A of 1711199603e16c41a1a94c45a03f0bd8 into 0a42420ed3b74c5483ac7db126a4150d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:36,037 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:36,037 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1711199603e16c41a1a94c45a03f0bd8/C in TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:36,037 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/A, priority=12, startTime=1730999855613; duration=0sec 2024-11-07T17:17:36,037 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:36,037 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:A 2024-11-07T17:17:36,037 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/166b84f9a95a40348e08fd53f9710aef, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef60fd1a4e4641d6ab5e769dd3279f82, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/22697fb571b14cf783e32a50fe785eaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0442e9b095594392b4f3650d0aeed13d] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp, totalSize=49.0 K 2024-11-07T17:17:36,037 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 166b84f9a95a40348e08fd53f9710aef, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1730999850608 2024-11-07T17:17:36,038 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ef60fd1a4e4641d6ab5e769dd3279f82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1730999851736 2024-11-07T17:17:36,038 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 22697fb571b14cf783e32a50fe785eaa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1730999852885 2024-11-07T17:17:36,038 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0442e9b095594392b4f3650d0aeed13d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1730999854058 2024-11-07T17:17:36,043 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1711199603e16c41a1a94c45a03f0bd8#C#compaction#483 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:36,044 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/cdb77bcc8d56412ea0adbdb89c7de3c5 is 50, key is test_row_0/C:col10/1730999854368/Put/seqid=0 2024-11-07T17:17:36,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742390_1566 (size=13459) 2024-11-07T17:17:36,126 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/85b197c2747d4d03bcd24316b6812ab5 2024-11-07T17:17:36,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/4aa369e721cd44a1ac8ea6c5e67e3d77 is 50, key is test_row_0/B:col10/1730999854397/Put/seqid=0 2024-11-07T17:17:36,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742391_1567 (size=12301) 2024-11-07T17:17:36,450 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/cdb77bcc8d56412ea0adbdb89c7de3c5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cdb77bcc8d56412ea0adbdb89c7de3c5 2024-11-07T17:17:36,454 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1711199603e16c41a1a94c45a03f0bd8/C of 1711199603e16c41a1a94c45a03f0bd8 into cdb77bcc8d56412ea0adbdb89c7de3c5(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:36,454 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:36,454 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8., storeName=1711199603e16c41a1a94c45a03f0bd8/C, priority=12, startTime=1730999855613; duration=0sec 2024-11-07T17:17:36,454 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:36,454 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1711199603e16c41a1a94c45a03f0bd8:C 2024-11-07T17:17:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:36,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. as already flushing 2024-11-07T17:17:36,527 DEBUG [Thread-2052 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:64938 2024-11-07T17:17:36,527 DEBUG [Thread-2052 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:36,534 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/4aa369e721cd44a1ac8ea6c5e67e3d77 2024-11-07T17:17:36,536 DEBUG [Thread-2060 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:64938 2024-11-07T17:17:36,537 DEBUG [Thread-2060 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:36,538 DEBUG [Thread-2056 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:64938 2024-11-07T17:17:36,538 DEBUG [Thread-2056 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:36,540 DEBUG [Thread-2058 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:64938 2024-11-07T17:17:36,540 DEBUG [Thread-2058 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:36,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/ef7cfb5fd6b24cd98dba2a9318cd79d3 is 50, key is test_row_0/C:col10/1730999854397/Put/seqid=0 2024-11-07T17:17:36,542 DEBUG [Thread-2054 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:64938 2024-11-07T17:17:36,542 DEBUG [Thread-2054 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:36,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742392_1568 (size=12301) 2024-11-07T17:17:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:36,944 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/ef7cfb5fd6b24cd98dba2a9318cd79d3 2024-11-07T17:17:36,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/85b197c2747d4d03bcd24316b6812ab5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/85b197c2747d4d03bcd24316b6812ab5 2024-11-07T17:17:36,949 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/85b197c2747d4d03bcd24316b6812ab5, entries=150, sequenceid=509, filesize=12.0 K 2024-11-07T17:17:36,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/4aa369e721cd44a1ac8ea6c5e67e3d77 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4aa369e721cd44a1ac8ea6c5e67e3d77 2024-11-07T17:17:36,952 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4aa369e721cd44a1ac8ea6c5e67e3d77, entries=150, sequenceid=509, filesize=12.0 K 2024-11-07T17:17:36,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/ef7cfb5fd6b24cd98dba2a9318cd79d3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef7cfb5fd6b24cd98dba2a9318cd79d3 2024-11-07T17:17:36,955 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef7cfb5fd6b24cd98dba2a9318cd79d3, entries=150, sequenceid=509, filesize=12.0 K 2024-11-07T17:17:36,956 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=33.54 KB/34350 for 1711199603e16c41a1a94c45a03f0bd8 in 1236ms, sequenceid=509, compaction requested=false 2024-11-07T17:17:36,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:36,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:36,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-07T17:17:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-07T17:17:36,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-07T17:17:36,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3040 sec 2024-11-07T17:17:36,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 2.3080 sec 2024-11-07T17:17:38,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T17:17:38,755 INFO [Thread-2062 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2341 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7023 rows 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2340 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7020 rows 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2336 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7008 rows 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2336 2024-11-07T17:17:38,755 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7002 rows 2024-11-07T17:17:38,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2347 2024-11-07T17:17:38,756 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7041 rows 2024-11-07T17:17:38,756 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:17:38,756 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:64938 2024-11-07T17:17:38,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:17:38,757 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T17:17:38,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T17:17:38,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:38,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-07T17:17:38,761 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999858761"}]},"ts":"1730999858761"} 2024-11-07T17:17:38,762 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T17:17:38,765 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T17:17:38,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:17:38,767 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, UNASSIGN}] 2024-11-07T17:17:38,767 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, UNASSIGN 2024-11-07T17:17:38,768 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=1711199603e16c41a1a94c45a03f0bd8, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:38,768 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:17:38,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure 1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:17:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-07T17:17:38,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:38,920 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:38,920 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:17:38,920 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing 1711199603e16c41a1a94c45a03f0bd8, disabling compactions & flushes 2024-11-07T17:17:38,920 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:38,920 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:38,920 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. after waiting 0 ms 2024-11-07T17:17:38,920 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:38,920 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing 1711199603e16c41a1a94c45a03f0bd8 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-07T17:17:38,920 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=A 2024-11-07T17:17:38,921 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:38,921 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=B 2024-11-07T17:17:38,921 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:38,921 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1711199603e16c41a1a94c45a03f0bd8, store=C 2024-11-07T17:17:38,921 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:38,924 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/03de630ee7f7479faf329e5ccf14bac9 is 50, key is test_row_0/A:col10/1730999856539/Put/seqid=0 2024-11-07T17:17:38,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742393_1569 (size=12301) 2024-11-07T17:17:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-07T17:17:39,327 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/03de630ee7f7479faf329e5ccf14bac9 2024-11-07T17:17:39,332 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d8fc460a4aa7430ca45a40f46c99d537 is 50, key is test_row_0/B:col10/1730999856539/Put/seqid=0 2024-11-07T17:17:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742394_1570 (size=12301) 2024-11-07T17:17:39,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-07T17:17:39,736 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d8fc460a4aa7430ca45a40f46c99d537 2024-11-07T17:17:39,741 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/8a13916ca0a0443cbf95e6bcb87b5361 is 50, key is test_row_0/C:col10/1730999856539/Put/seqid=0 2024-11-07T17:17:39,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742395_1571 (size=12301) 2024-11-07T17:17:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-07T17:17:40,144 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/8a13916ca0a0443cbf95e6bcb87b5361 2024-11-07T17:17:40,148 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/A/03de630ee7f7479faf329e5ccf14bac9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/03de630ee7f7479faf329e5ccf14bac9 2024-11-07T17:17:40,150 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/03de630ee7f7479faf329e5ccf14bac9, entries=150, sequenceid=520, filesize=12.0 K 2024-11-07T17:17:40,151 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/B/d8fc460a4aa7430ca45a40f46c99d537 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d8fc460a4aa7430ca45a40f46c99d537 2024-11-07T17:17:40,153 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d8fc460a4aa7430ca45a40f46c99d537, entries=150, sequenceid=520, filesize=12.0 K 2024-11-07T17:17:40,154 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/.tmp/C/8a13916ca0a0443cbf95e6bcb87b5361 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8a13916ca0a0443cbf95e6bcb87b5361 2024-11-07T17:17:40,156 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8a13916ca0a0443cbf95e6bcb87b5361, entries=150, sequenceid=520, filesize=12.0 K 2024-11-07T17:17:40,157 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 1711199603e16c41a1a94c45a03f0bd8 in 1237ms, sequenceid=520, compaction requested=true 2024-11-07T17:17:40,158 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/92a3811c9e35457c895d2a4a9dfd4df5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/67d304c9e4fc404f8c56d9d1e64a4801, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c40aa37913b74728ac6b733cef9acaa5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e8b1ad029bae41ba87a4be65ebd7ad8c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d5b1dae73b82402e8cbd61052e6a80eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b601482b6176432882ee58487636d728, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f300ce89b09f4afcb08e69edf3f01c9f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/8ffc9ec9e1e54634a92cce5162000a09, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/882294ca131045c1af89b82f26859aa2, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/678160f0848740d49b510f5b15475824, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c83d9c4c1ed841c2984bfca621774890, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c88e8494d447483fbf51fa2a36019df7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/02cc3a79a3a24947af444b62047901f5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/af82db6449504abbbb01fa961ad57ca7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d2e1bb4e50d3479eafae3e8a7cfb33e9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/4da63d1441ac475a829eb83f61e8207e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/420083e621094671bcdc85451e218d03, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9fd9560861394bada3a51f4d9c44c9da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b09e0b2803ec44bdad85100a5cb1e9f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/81d9f3f8c1e047f7b96f3331ca1d0fdd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e9564afe4c63460d8946c9911fa79dea, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e2b33c250e194ece9ee47b461ede22fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d5d4d1b2fac49f39f133f4685bbeddb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f520ea0e775547e0b560a515c41b68a7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/bd5253c83a344c80a0d40e12d0296619, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9c8c64f14d1e46f599d1f8012ea7c0ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/866a4d80fa154e8ab8584bbe8beec7aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/82bf24badb4d474a80495c38745406ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/314aec1d704b46bdbd90fe6c97ca82ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ea2eed37da1649eeac6b0be5f15644f1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d4997ce690247fd8b5d44114b66ef70, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e494891e80bc4ea38c12fcc59b634d43, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ba27f2ebd9434034822333c3d55cc61f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/498147a43f3348d18e7c0e928056ec01] to archive 2024-11-07T17:17:40,158 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:17:40,160 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/92a3811c9e35457c895d2a4a9dfd4df5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/92a3811c9e35457c895d2a4a9dfd4df5 2024-11-07T17:17:40,161 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/67d304c9e4fc404f8c56d9d1e64a4801 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/67d304c9e4fc404f8c56d9d1e64a4801 2024-11-07T17:17:40,161 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c40aa37913b74728ac6b733cef9acaa5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c40aa37913b74728ac6b733cef9acaa5 2024-11-07T17:17:40,162 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e8b1ad029bae41ba87a4be65ebd7ad8c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e8b1ad029bae41ba87a4be65ebd7ad8c 2024-11-07T17:17:40,163 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d5b1dae73b82402e8cbd61052e6a80eb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d5b1dae73b82402e8cbd61052e6a80eb 2024-11-07T17:17:40,164 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b601482b6176432882ee58487636d728 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b601482b6176432882ee58487636d728 2024-11-07T17:17:40,165 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f300ce89b09f4afcb08e69edf3f01c9f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f300ce89b09f4afcb08e69edf3f01c9f 2024-11-07T17:17:40,165 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/8ffc9ec9e1e54634a92cce5162000a09 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/8ffc9ec9e1e54634a92cce5162000a09 2024-11-07T17:17:40,166 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/882294ca131045c1af89b82f26859aa2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/882294ca131045c1af89b82f26859aa2 2024-11-07T17:17:40,167 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/678160f0848740d49b510f5b15475824 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/678160f0848740d49b510f5b15475824 2024-11-07T17:17:40,168 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c83d9c4c1ed841c2984bfca621774890 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c83d9c4c1ed841c2984bfca621774890 2024-11-07T17:17:40,168 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c88e8494d447483fbf51fa2a36019df7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/c88e8494d447483fbf51fa2a36019df7 2024-11-07T17:17:40,169 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/02cc3a79a3a24947af444b62047901f5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/02cc3a79a3a24947af444b62047901f5 2024-11-07T17:17:40,170 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/af82db6449504abbbb01fa961ad57ca7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/af82db6449504abbbb01fa961ad57ca7 2024-11-07T17:17:40,171 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d2e1bb4e50d3479eafae3e8a7cfb33e9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/d2e1bb4e50d3479eafae3e8a7cfb33e9 2024-11-07T17:17:40,172 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/4da63d1441ac475a829eb83f61e8207e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/4da63d1441ac475a829eb83f61e8207e 2024-11-07T17:17:40,172 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/420083e621094671bcdc85451e218d03 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/420083e621094671bcdc85451e218d03 2024-11-07T17:17:40,173 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9fd9560861394bada3a51f4d9c44c9da to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9fd9560861394bada3a51f4d9c44c9da 2024-11-07T17:17:40,174 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b09e0b2803ec44bdad85100a5cb1e9f6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/b09e0b2803ec44bdad85100a5cb1e9f6 2024-11-07T17:17:40,175 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/81d9f3f8c1e047f7b96f3331ca1d0fdd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/81d9f3f8c1e047f7b96f3331ca1d0fdd 2024-11-07T17:17:40,175 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e9564afe4c63460d8946c9911fa79dea to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e9564afe4c63460d8946c9911fa79dea 2024-11-07T17:17:40,176 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e2b33c250e194ece9ee47b461ede22fa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e2b33c250e194ece9ee47b461ede22fa 2024-11-07T17:17:40,177 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d5d4d1b2fac49f39f133f4685bbeddb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d5d4d1b2fac49f39f133f4685bbeddb 2024-11-07T17:17:40,178 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f520ea0e775547e0b560a515c41b68a7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/f520ea0e775547e0b560a515c41b68a7 2024-11-07T17:17:40,178 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/bd5253c83a344c80a0d40e12d0296619 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/bd5253c83a344c80a0d40e12d0296619 2024-11-07T17:17:40,179 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9c8c64f14d1e46f599d1f8012ea7c0ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/9c8c64f14d1e46f599d1f8012ea7c0ab 2024-11-07T17:17:40,180 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/866a4d80fa154e8ab8584bbe8beec7aa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/866a4d80fa154e8ab8584bbe8beec7aa 2024-11-07T17:17:40,181 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/82bf24badb4d474a80495c38745406ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/82bf24badb4d474a80495c38745406ab 2024-11-07T17:17:40,182 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/314aec1d704b46bdbd90fe6c97ca82ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/314aec1d704b46bdbd90fe6c97ca82ab 2024-11-07T17:17:40,182 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ea2eed37da1649eeac6b0be5f15644f1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ea2eed37da1649eeac6b0be5f15644f1 2024-11-07T17:17:40,183 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d4997ce690247fd8b5d44114b66ef70 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/5d4997ce690247fd8b5d44114b66ef70 2024-11-07T17:17:40,184 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e494891e80bc4ea38c12fcc59b634d43 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/e494891e80bc4ea38c12fcc59b634d43 2024-11-07T17:17:40,185 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ba27f2ebd9434034822333c3d55cc61f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/ba27f2ebd9434034822333c3d55cc61f 2024-11-07T17:17:40,186 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/498147a43f3348d18e7c0e928056ec01 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/498147a43f3348d18e7c0e928056ec01 2024-11-07T17:17:40,187 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/478d5dd5a81843be877947d981f13212, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/33208c211b654ff48ca4ca5f1f26c180, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/14839db64b1d4710b00c798aad8cd655, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d875f57a28ff402fa513fbe2e0029a17, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/fbd8f591bb5b4ec3b25d2d4a77cae75f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/145c1a8ec862405b9d61c0e0e1812baf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/67afb6b9fd894dcf8c668f7ec7ff8567, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c2b432aeabbb4fa187da953c15702736, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4f8e625ef25743cb898c76df4bfdb018, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/60065c8946fa4cdfa3d9b05fa6d60fb1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1810f0b142f2429cb0b0ea1959773e38, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8e2abb0c419647c4aa2bba3b621d5830, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1d13508373bf415ab00b59945d92134d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/88872c1a058f40e39e000acc8b604d19, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/34a9d3ace33c48aaa6ab7199072448e1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c1cd030f1eef4d1c85cdc5e0626355f6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/f16848d12ee94178b63fe65fe308aa94, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/32999c953e514f0aa0519347724a3172, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5eb5fb214b814727bc330325486cb8b1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8ea41b0e122943e2b7b815c17dd1db82, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/eb0cdcde49d64bbca96e0692e79acc44, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/ea8d2436ff9d43b48841ce5686f0f46a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b865835dbc664dbf98afcc79332e9644, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/2b9316d3d2cc40f2ac358b6aa6179743, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1084fa1dccf24917afd970e4443886f8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2d1a7f4370b458ca60ee88bb3a7db8d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d9a203faed354cbc9132b21a4d17a288, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5779f878ce684ecf964b8c9430245e35, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2333a46df17419a86188c2c2b23d546, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2618cb46ff1478283fc25c54821c0e1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/56d73be42bfd4fbfa5a65557d994ad0a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/aebe49c845bb4481a1c77f3414fe79e8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/39bc29163db74d95b6e14b0be6347eb1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/20e006a7d8284bd3b569159331e801eb] to archive 2024-11-07T17:17:40,188 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:17:40,189 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/478d5dd5a81843be877947d981f13212 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/478d5dd5a81843be877947d981f13212 2024-11-07T17:17:40,189 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/33208c211b654ff48ca4ca5f1f26c180 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/33208c211b654ff48ca4ca5f1f26c180 2024-11-07T17:17:40,190 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/14839db64b1d4710b00c798aad8cd655 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/14839db64b1d4710b00c798aad8cd655 2024-11-07T17:17:40,191 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d875f57a28ff402fa513fbe2e0029a17 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d875f57a28ff402fa513fbe2e0029a17 2024-11-07T17:17:40,191 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/fbd8f591bb5b4ec3b25d2d4a77cae75f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/fbd8f591bb5b4ec3b25d2d4a77cae75f 2024-11-07T17:17:40,192 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/145c1a8ec862405b9d61c0e0e1812baf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/145c1a8ec862405b9d61c0e0e1812baf 2024-11-07T17:17:40,193 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/67afb6b9fd894dcf8c668f7ec7ff8567 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/67afb6b9fd894dcf8c668f7ec7ff8567 2024-11-07T17:17:40,194 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c2b432aeabbb4fa187da953c15702736 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c2b432aeabbb4fa187da953c15702736 2024-11-07T17:17:40,194 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4f8e625ef25743cb898c76df4bfdb018 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4f8e625ef25743cb898c76df4bfdb018 2024-11-07T17:17:40,195 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/60065c8946fa4cdfa3d9b05fa6d60fb1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/60065c8946fa4cdfa3d9b05fa6d60fb1 2024-11-07T17:17:40,196 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1810f0b142f2429cb0b0ea1959773e38 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1810f0b142f2429cb0b0ea1959773e38 2024-11-07T17:17:40,197 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8e2abb0c419647c4aa2bba3b621d5830 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8e2abb0c419647c4aa2bba3b621d5830 2024-11-07T17:17:40,198 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1d13508373bf415ab00b59945d92134d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1d13508373bf415ab00b59945d92134d 2024-11-07T17:17:40,198 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/88872c1a058f40e39e000acc8b604d19 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/88872c1a058f40e39e000acc8b604d19 2024-11-07T17:17:40,199 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/34a9d3ace33c48aaa6ab7199072448e1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/34a9d3ace33c48aaa6ab7199072448e1 2024-11-07T17:17:40,200 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c1cd030f1eef4d1c85cdc5e0626355f6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/c1cd030f1eef4d1c85cdc5e0626355f6 2024-11-07T17:17:40,200 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/f16848d12ee94178b63fe65fe308aa94 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/f16848d12ee94178b63fe65fe308aa94 2024-11-07T17:17:40,201 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/32999c953e514f0aa0519347724a3172 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/32999c953e514f0aa0519347724a3172 2024-11-07T17:17:40,202 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5eb5fb214b814727bc330325486cb8b1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5eb5fb214b814727bc330325486cb8b1 2024-11-07T17:17:40,203 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8ea41b0e122943e2b7b815c17dd1db82 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/8ea41b0e122943e2b7b815c17dd1db82 2024-11-07T17:17:40,203 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/eb0cdcde49d64bbca96e0692e79acc44 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/eb0cdcde49d64bbca96e0692e79acc44 2024-11-07T17:17:40,204 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/ea8d2436ff9d43b48841ce5686f0f46a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/ea8d2436ff9d43b48841ce5686f0f46a 2024-11-07T17:17:40,205 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b865835dbc664dbf98afcc79332e9644 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b865835dbc664dbf98afcc79332e9644 2024-11-07T17:17:40,206 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/2b9316d3d2cc40f2ac358b6aa6179743 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/2b9316d3d2cc40f2ac358b6aa6179743 2024-11-07T17:17:40,207 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1084fa1dccf24917afd970e4443886f8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/1084fa1dccf24917afd970e4443886f8 2024-11-07T17:17:40,207 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2d1a7f4370b458ca60ee88bb3a7db8d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2d1a7f4370b458ca60ee88bb3a7db8d 2024-11-07T17:17:40,208 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d9a203faed354cbc9132b21a4d17a288 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d9a203faed354cbc9132b21a4d17a288 2024-11-07T17:17:40,209 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5779f878ce684ecf964b8c9430245e35 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/5779f878ce684ecf964b8c9430245e35 2024-11-07T17:17:40,210 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2333a46df17419a86188c2c2b23d546 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2333a46df17419a86188c2c2b23d546 2024-11-07T17:17:40,210 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2618cb46ff1478283fc25c54821c0e1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/b2618cb46ff1478283fc25c54821c0e1 2024-11-07T17:17:40,211 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/56d73be42bfd4fbfa5a65557d994ad0a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/56d73be42bfd4fbfa5a65557d994ad0a 2024-11-07T17:17:40,212 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/aebe49c845bb4481a1c77f3414fe79e8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/aebe49c845bb4481a1c77f3414fe79e8 2024-11-07T17:17:40,213 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/39bc29163db74d95b6e14b0be6347eb1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/39bc29163db74d95b6e14b0be6347eb1 2024-11-07T17:17:40,213 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/20e006a7d8284bd3b569159331e801eb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/20e006a7d8284bd3b569159331e801eb 2024-11-07T17:17:40,215 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e0c5620d204149ca8eb3d57fdca4e000, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e431eacc4af400a969403a6a33d5eb8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/39a9c95c7984497ca1949f033851468d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9d32b7c2ff3845ceb0e0e70a3ed9befa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d5959533c3e143748842f992ae74d927, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/b277c85dcdf6453dba063dce4389ef33, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0fb5a51e85bc443db71ec785d848535e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/74d518de429a49c7a9eca50a2507113a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2b6d8a25006f44cbaf64460cbe4a0fb5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2d0508665a7b4b64857aa53606d3f8eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cbe8226c3bb34326adc7dd40b52fcf67, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fa7326d0388f4f3eae0c665b30af65f5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e556da3538834772b2a46bc423f58b5c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9c432c1ba3f2424c928c7407e7bc6e26, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/517de94153fb4e73bcf184e452c0b679, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a56a8ed566b6498ca8f707340ba33dcb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8f0a8d01eb2e4145bd134c6471cf0fcd, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/32bfbbb7576647a58ec5d405f3e5bf4b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/88b3edaadc0e4ead9819cafed2603c3d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c78e6267159a48eca453104e7cd50e22, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/4e00d5bddeff420e901ec00a59cab2fa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e34c2b306ad4fdface66278e6cfe493, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fb0a389d6a704275a372e9b5c232c7d1, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a7aac47356b840adac08954d2024ade7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c8d8d8f021aa410d9f26a22cb8b2e043, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7c2968fd8875475ea1c5972483a09b04, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d3b6051b53a341cbb7451c3930d73ff5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d921778379db4eb88cda6fb325651595, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/166b84f9a95a40348e08fd53f9710aef, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7ef57814e1984e12b7d0b11c19800074, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef60fd1a4e4641d6ab5e769dd3279f82, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/22697fb571b14cf783e32a50fe785eaa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0442e9b095594392b4f3650d0aeed13d] to archive 2024-11-07T17:17:40,215 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:17:40,216 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e0c5620d204149ca8eb3d57fdca4e000 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e0c5620d204149ca8eb3d57fdca4e000 2024-11-07T17:17:40,217 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e431eacc4af400a969403a6a33d5eb8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e431eacc4af400a969403a6a33d5eb8 2024-11-07T17:17:40,218 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/39a9c95c7984497ca1949f033851468d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/39a9c95c7984497ca1949f033851468d 2024-11-07T17:17:40,219 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9d32b7c2ff3845ceb0e0e70a3ed9befa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9d32b7c2ff3845ceb0e0e70a3ed9befa 2024-11-07T17:17:40,220 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d5959533c3e143748842f992ae74d927 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d5959533c3e143748842f992ae74d927 2024-11-07T17:17:40,221 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/b277c85dcdf6453dba063dce4389ef33 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/b277c85dcdf6453dba063dce4389ef33 2024-11-07T17:17:40,222 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0fb5a51e85bc443db71ec785d848535e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0fb5a51e85bc443db71ec785d848535e 2024-11-07T17:17:40,223 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/74d518de429a49c7a9eca50a2507113a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/74d518de429a49c7a9eca50a2507113a 2024-11-07T17:17:40,223 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2b6d8a25006f44cbaf64460cbe4a0fb5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2b6d8a25006f44cbaf64460cbe4a0fb5 2024-11-07T17:17:40,224 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2d0508665a7b4b64857aa53606d3f8eb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/2d0508665a7b4b64857aa53606d3f8eb 2024-11-07T17:17:40,225 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cbe8226c3bb34326adc7dd40b52fcf67 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cbe8226c3bb34326adc7dd40b52fcf67 2024-11-07T17:17:40,226 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fa7326d0388f4f3eae0c665b30af65f5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fa7326d0388f4f3eae0c665b30af65f5 2024-11-07T17:17:40,226 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e556da3538834772b2a46bc423f58b5c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/e556da3538834772b2a46bc423f58b5c 2024-11-07T17:17:40,227 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9c432c1ba3f2424c928c7407e7bc6e26 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/9c432c1ba3f2424c928c7407e7bc6e26 2024-11-07T17:17:40,228 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/517de94153fb4e73bcf184e452c0b679 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/517de94153fb4e73bcf184e452c0b679 2024-11-07T17:17:40,228 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a56a8ed566b6498ca8f707340ba33dcb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a56a8ed566b6498ca8f707340ba33dcb 2024-11-07T17:17:40,229 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/bc3bacfcc51640fc8e66ea7ab9b0a0d0 2024-11-07T17:17:40,230 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8f0a8d01eb2e4145bd134c6471cf0fcd to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8f0a8d01eb2e4145bd134c6471cf0fcd 2024-11-07T17:17:40,231 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/32bfbbb7576647a58ec5d405f3e5bf4b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/32bfbbb7576647a58ec5d405f3e5bf4b 2024-11-07T17:17:40,231 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/88b3edaadc0e4ead9819cafed2603c3d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/88b3edaadc0e4ead9819cafed2603c3d 2024-11-07T17:17:40,232 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c78e6267159a48eca453104e7cd50e22 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c78e6267159a48eca453104e7cd50e22 2024-11-07T17:17:40,233 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/4e00d5bddeff420e901ec00a59cab2fa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/4e00d5bddeff420e901ec00a59cab2fa 2024-11-07T17:17:40,234 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e34c2b306ad4fdface66278e6cfe493 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/1e34c2b306ad4fdface66278e6cfe493 2024-11-07T17:17:40,235 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fb0a389d6a704275a372e9b5c232c7d1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/fb0a389d6a704275a372e9b5c232c7d1 2024-11-07T17:17:40,236 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a7aac47356b840adac08954d2024ade7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/a7aac47356b840adac08954d2024ade7 2024-11-07T17:17:40,236 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c8d8d8f021aa410d9f26a22cb8b2e043 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/c8d8d8f021aa410d9f26a22cb8b2e043 2024-11-07T17:17:40,237 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7c2968fd8875475ea1c5972483a09b04 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7c2968fd8875475ea1c5972483a09b04 2024-11-07T17:17:40,238 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d3b6051b53a341cbb7451c3930d73ff5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d3b6051b53a341cbb7451c3930d73ff5 2024-11-07T17:17:40,238 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d921778379db4eb88cda6fb325651595 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/d921778379db4eb88cda6fb325651595 2024-11-07T17:17:40,239 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/166b84f9a95a40348e08fd53f9710aef to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/166b84f9a95a40348e08fd53f9710aef 2024-11-07T17:17:40,240 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7ef57814e1984e12b7d0b11c19800074 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/7ef57814e1984e12b7d0b11c19800074 2024-11-07T17:17:40,240 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef60fd1a4e4641d6ab5e769dd3279f82 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef60fd1a4e4641d6ab5e769dd3279f82 2024-11-07T17:17:40,241 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/22697fb571b14cf783e32a50fe785eaa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/22697fb571b14cf783e32a50fe785eaa 2024-11-07T17:17:40,242 DEBUG [StoreCloser-TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0442e9b095594392b4f3650d0aeed13d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/0442e9b095594392b4f3650d0aeed13d 2024-11-07T17:17:40,245 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/recovered.edits/523.seqid, newMaxSeqId=523, maxSeqId=1 2024-11-07T17:17:40,245 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8. 2024-11-07T17:17:40,246 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for 1711199603e16c41a1a94c45a03f0bd8: 2024-11-07T17:17:40,247 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed 1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:40,247 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=1711199603e16c41a1a94c45a03f0bd8, regionState=CLOSED 2024-11-07T17:17:40,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-07T17:17:40,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure 1711199603e16c41a1a94c45a03f0bd8, server=3a0fde618c86,37403,1730999712734 in 1.4800 sec 2024-11-07T17:17:40,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-07T17:17:40,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1711199603e16c41a1a94c45a03f0bd8, UNASSIGN in 1.4820 sec 2024-11-07T17:17:40,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-07T17:17:40,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4840 sec 2024-11-07T17:17:40,252 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999860252"}]},"ts":"1730999860252"} 2024-11-07T17:17:40,252 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T17:17:40,254 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T17:17:40,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4970 sec 2024-11-07T17:17:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-07T17:17:40,864 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-07T17:17:40,864 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T17:17:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,866 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-07T17:17:40,866 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,868 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:40,869 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/recovered.edits] 2024-11-07T17:17:40,871 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/03de630ee7f7479faf329e5ccf14bac9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/03de630ee7f7479faf329e5ccf14bac9 2024-11-07T17:17:40,872 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/0a42420ed3b74c5483ac7db126a4150d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/0a42420ed3b74c5483ac7db126a4150d 2024-11-07T17:17:40,873 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/85b197c2747d4d03bcd24316b6812ab5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/A/85b197c2747d4d03bcd24316b6812ab5 2024-11-07T17:17:40,874 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/17a0ac229f814cb78d173e04597d4844 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/17a0ac229f814cb78d173e04597d4844 2024-11-07T17:17:40,875 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4aa369e721cd44a1ac8ea6c5e67e3d77 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/4aa369e721cd44a1ac8ea6c5e67e3d77 2024-11-07T17:17:40,876 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d8fc460a4aa7430ca45a40f46c99d537 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/B/d8fc460a4aa7430ca45a40f46c99d537 2024-11-07T17:17:40,877 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8a13916ca0a0443cbf95e6bcb87b5361 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/8a13916ca0a0443cbf95e6bcb87b5361 2024-11-07T17:17:40,878 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cdb77bcc8d56412ea0adbdb89c7de3c5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/cdb77bcc8d56412ea0adbdb89c7de3c5 2024-11-07T17:17:40,879 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef7cfb5fd6b24cd98dba2a9318cd79d3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/C/ef7cfb5fd6b24cd98dba2a9318cd79d3 2024-11-07T17:17:40,881 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/recovered.edits/523.seqid to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8/recovered.edits/523.seqid 2024-11-07T17:17:40,881 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/1711199603e16c41a1a94c45a03f0bd8 2024-11-07T17:17:40,881 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T17:17:40,883 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,884 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T17:17:40,885 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T17:17:40,886 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,886 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T17:17:40,886 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730999860886"}]},"ts":"9223372036854775807"} 2024-11-07T17:17:40,888 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T17:17:40,888 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1711199603e16c41a1a94c45a03f0bd8, NAME => 'TestAcidGuarantees,,1730999833516.1711199603e16c41a1a94c45a03f0bd8.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T17:17:40,888 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T17:17:40,888 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730999860888"}]},"ts":"9223372036854775807"} 2024-11-07T17:17:40,889 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T17:17:40,891 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,891 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-11-07T17:17:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-07T17:17:40,967 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-07T17:17:40,975 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239 (was 235) - Thread LEAK? -, OpenFileDescriptor=451 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=496 (was 537), ProcessCount=11 (was 11), AvailableMemoryMB=3542 (was 2713) - AvailableMemoryMB LEAK? - 2024-11-07T17:17:40,984 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=496, ProcessCount=11, AvailableMemoryMB=3542 2024-11-07T17:17:40,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:17:40,985 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:17:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:40,986 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T17:17:40,986 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:40,986 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-11-07T17:17:40,987 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T17:17:40,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T17:17:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742396_1572 (size=960) 2024-11-07T17:17:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T17:17:41,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T17:17:41,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T17:17:41,395 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17 2024-11-07T17:17:41,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742397_1573 (size=53) 2024-11-07T17:17:41,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T17:17:41,800 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:17:41,800 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 990807c0d50040fb7da6789c8418caee, disabling compactions & flushes 2024-11-07T17:17:41,800 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:41,800 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:41,800 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. after waiting 0 ms 2024-11-07T17:17:41,800 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:41,800 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:41,800 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:41,801 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T17:17:41,801 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730999861801"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730999861801"}]},"ts":"1730999861801"} 2024-11-07T17:17:41,802 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T17:17:41,803 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T17:17:41,803 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999861803"}]},"ts":"1730999861803"} 2024-11-07T17:17:41,803 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T17:17:41,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, ASSIGN}] 2024-11-07T17:17:41,808 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, ASSIGN 2024-11-07T17:17:41,808 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, ASSIGN; state=OFFLINE, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=false 2024-11-07T17:17:41,958 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:41,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:17:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T17:17:42,111 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:42,114 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:42,114 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:17:42,114 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,114 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:17:42,114 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,114 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,116 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,117 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:42,117 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990807c0d50040fb7da6789c8418caee columnFamilyName A 2024-11-07T17:17:42,117 DEBUG [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:42,117 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(327): Store=990807c0d50040fb7da6789c8418caee/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:42,117 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,118 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:42,119 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990807c0d50040fb7da6789c8418caee columnFamilyName B 2024-11-07T17:17:42,119 DEBUG [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:42,119 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(327): Store=990807c0d50040fb7da6789c8418caee/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:42,119 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,120 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:42,120 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990807c0d50040fb7da6789c8418caee columnFamilyName C 2024-11-07T17:17:42,120 DEBUG [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:42,120 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(327): Store=990807c0d50040fb7da6789c8418caee/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:42,121 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:42,121 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,122 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,123 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:17:42,124 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:42,126 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T17:17:42,126 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened 990807c0d50040fb7da6789c8418caee; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59764715, jitterRate=-0.10943634808063507}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:17:42,127 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:42,127 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., pid=157, masterSystemTime=1730999862111 2024-11-07T17:17:42,128 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:42,129 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:42,129 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=OPEN, openSeqNum=2, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:42,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-07T17:17:42,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 in 170 msec 2024-11-07T17:17:42,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-07T17:17:42,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, ASSIGN in 324 msec 2024-11-07T17:17:42,132 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T17:17:42,132 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999862132"}]},"ts":"1730999862132"} 2024-11-07T17:17:42,133 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T17:17:42,135 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T17:17:42,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-11-07T17:17:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T17:17:43,090 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-07T17:17:43,091 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-07T17:17:43,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:43,099 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:43,100 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:43,101 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T17:17:43,102 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T17:17:43,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T17:17:43,103 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T17:17:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-07T17:17:43,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742398_1574 (size=996) 2024-11-07T17:17:43,512 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-07T17:17:43,512 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-07T17:17:43,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:17:43,515 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, REOPEN/MOVE}] 2024-11-07T17:17:43,516 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, REOPEN/MOVE 2024-11-07T17:17:43,516 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:43,517 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:17:43,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:17:43,523 ERROR [LeaseRenewer:jenkins@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:39903,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:43,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:43,669 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,669 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:17:43,669 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing 990807c0d50040fb7da6789c8418caee, disabling compactions & flushes 2024-11-07T17:17:43,669 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,669 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,669 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. after waiting 0 ms 2024-11-07T17:17:43,669 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,672 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T17:17:43,673 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,673 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:43,673 WARN [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: 990807c0d50040fb7da6789c8418caee to self. 2024-11-07T17:17:43,674 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,674 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=CLOSED 2024-11-07T17:17:43,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-07T17:17:43,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 in 158 msec 2024-11-07T17:17:43,676 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, REOPEN/MOVE; state=CLOSED, location=3a0fde618c86,37403,1730999712734; forceNewPlan=false, retain=true 2024-11-07T17:17:43,827 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=OPENING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:43,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:17:43,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:43,981 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,981 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} 2024-11-07T17:17:43,982 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,982 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T17:17:43,982 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,982 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,983 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,983 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:43,984 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990807c0d50040fb7da6789c8418caee columnFamilyName A 2024-11-07T17:17:43,985 DEBUG [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:43,985 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(327): Store=990807c0d50040fb7da6789c8418caee/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:43,985 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,986 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:43,986 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990807c0d50040fb7da6789c8418caee columnFamilyName B 2024-11-07T17:17:43,986 DEBUG [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:43,986 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(327): Store=990807c0d50040fb7da6789c8418caee/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:43,986 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,987 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T17:17:43,987 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990807c0d50040fb7da6789c8418caee columnFamilyName C 2024-11-07T17:17:43,987 DEBUG [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:43,987 INFO [StoreOpener-990807c0d50040fb7da6789c8418caee-1 {}] regionserver.HStore(327): Store=990807c0d50040fb7da6789c8418caee/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T17:17:43,987 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,988 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,989 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,990 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T17:17:43,991 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:43,991 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened 990807c0d50040fb7da6789c8418caee; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61895350, jitterRate=-0.07768741250038147}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T17:17:43,992 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:43,992 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., pid=162, masterSystemTime=1730999863979 2024-11-07T17:17:43,993 DEBUG [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,994 INFO [RS_OPEN_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:43,994 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=OPEN, openSeqNum=5, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:43,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-11-07T17:17:43,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 in 167 msec 2024-11-07T17:17:43,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-07T17:17:43,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, REOPEN/MOVE in 481 msec 2024-11-07T17:17:43,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-07T17:17:43,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-11-07T17:17:43,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 895 msec 2024-11-07T17:17:44,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-07T17:17:44,001 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-07T17:17:44,007 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,008 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-07T17:17:44,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,013 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-07T17:17:44,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,017 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-07T17:17:44,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,022 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-11-07T17:17:44,024 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,025 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-07T17:17:44,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,028 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-07T17:17:44,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,031 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-07T17:17:44,033 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-11-07T17:17:44,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,038 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x410bf0c8 to 127.0.0.1:64938 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15b6349f 2024-11-07T17:17:44,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@503a7d2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T17:17:44,044 DEBUG [hconnection-0x7e20c0c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,044 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:44,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-07T17:17:44,045 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,046 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:44,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:44,046 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:44,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:44,048 DEBUG [hconnection-0x76e2646b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,048 DEBUG [hconnection-0x267aff80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,049 DEBUG [hconnection-0x45a864aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,049 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,049 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,050 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:44,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T17:17:44,054 DEBUG [hconnection-0x3f766969-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:44,054 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:44,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:44,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:44,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:44,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:44,064 DEBUG [hconnection-0x58681e90-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,065 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,065 DEBUG [hconnection-0x5905e3db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,065 DEBUG [hconnection-0x40946f0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,066 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,066 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51536, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,068 DEBUG [hconnection-0x7f457114-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999924067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999924067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,069 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999924068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999924070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,092 DEBUG [hconnection-0x26d3cf66-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T17:17:44,093 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T17:17:44,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999924094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c99c46c854c44bdf95280f4edd5675eb_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999864052/Put/seqid=0 2024-11-07T17:17:44,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742399_1575 (size=12154) 2024-11-07T17:17:44,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:44,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999924169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999924169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999924171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999924172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999924196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:44,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:44,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:44,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999924371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999924372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999924373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999924375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999924399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,502 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:44,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:44,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,510 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:44,513 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c99c46c854c44bdf95280f4edd5675eb_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c99c46c854c44bdf95280f4edd5675eb_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:44,514 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/05f56f82a8b449bf93b0ff89ea2a48ab, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:44,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/05f56f82a8b449bf93b0ff89ea2a48ab is 175, key is test_row_0/A:col10/1730999864052/Put/seqid=0 2024-11-07T17:17:44,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742400_1576 (size=30955) 2024-11-07T17:17:44,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:44,655 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:44,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:44,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999924674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999924675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999924676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999924676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999924702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,807 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:44,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:44,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,921 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/05f56f82a8b449bf93b0ff89ea2a48ab 2024-11-07T17:17:44,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/14354eb6130e433dae7aef827ee34558 is 50, key is test_row_0/B:col10/1730999864052/Put/seqid=0 2024-11-07T17:17:44,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742401_1577 (size=12001) 2024-11-07T17:17:44,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/14354eb6130e433dae7aef827ee34558 2024-11-07T17:17:44,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:44,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:44,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:44,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:44,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:44,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/fb35eecf00dc4a6ca5e9dafa8cb879ae is 50, key is test_row_0/C:col10/1730999864052/Put/seqid=0 2024-11-07T17:17:44,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742402_1578 (size=12001) 2024-11-07T17:17:45,112 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:45,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:45,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:45,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:45,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:45,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:45,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:45,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:45,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999925177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:45,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999925180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:45,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999925180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:45,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999925181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:45,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999925204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,265 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:45,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:45,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:45,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:45,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:45,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:45,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:45,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/fb35eecf00dc4a6ca5e9dafa8cb879ae 2024-11-07T17:17:45,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/05f56f82a8b449bf93b0ff89ea2a48ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab 2024-11-07T17:17:45,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab, entries=150, sequenceid=17, filesize=30.2 K 2024-11-07T17:17:45,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/14354eb6130e433dae7aef827ee34558 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/14354eb6130e433dae7aef827ee34558 2024-11-07T17:17:45,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/14354eb6130e433dae7aef827ee34558, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T17:17:45,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/fb35eecf00dc4a6ca5e9dafa8cb879ae as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/fb35eecf00dc4a6ca5e9dafa8cb879ae 2024-11-07T17:17:45,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/fb35eecf00dc4a6ca5e9dafa8cb879ae, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T17:17:45,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 990807c0d50040fb7da6789c8418caee in 1335ms, sequenceid=17, compaction requested=false 2024-11-07T17:17:45,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:45,417 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:45,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:45,418 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:45,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:45,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110790f3b2a3efc44e67b2e0d27928eee36b_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999864064/Put/seqid=0 2024-11-07T17:17:45,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742403_1579 (size=12154) 2024-11-07T17:17:45,764 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T17:17:45,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:45,831 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110790f3b2a3efc44e67b2e0d27928eee36b_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110790f3b2a3efc44e67b2e0d27928eee36b_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:45,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/2eb889ca96324e4593f6c1c98d326c97, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:45,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/2eb889ca96324e4593f6c1c98d326c97 is 175, key is test_row_0/A:col10/1730999864064/Put/seqid=0 2024-11-07T17:17:45,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742404_1580 (size=30955) 2024-11-07T17:17:45,981 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:39903 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:39903,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:46,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:46,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999926191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999926192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999926193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999926194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999926212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,236 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/2eb889ca96324e4593f6c1c98d326c97 2024-11-07T17:17:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/0624822464e145c093dd368c8875594a is 50, key is test_row_0/B:col10/1730999864064/Put/seqid=0 2024-11-07T17:17:46,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742405_1581 (size=12001) 2024-11-07T17:17:46,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999926294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999926295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999926296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999926296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999926497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999926498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999926499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999926499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,648 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/0624822464e145c093dd368c8875594a 2024-11-07T17:17:46,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/e3ae45c727eb47429f8be17ce44f14db is 50, key is test_row_0/C:col10/1730999864064/Put/seqid=0 2024-11-07T17:17:46,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742406_1582 (size=12001) 2024-11-07T17:17:46,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999926799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999926801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999926801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:46,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999926803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,059 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/e3ae45c727eb47429f8be17ce44f14db 2024-11-07T17:17:47,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/2eb889ca96324e4593f6c1c98d326c97 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97 2024-11-07T17:17:47,066 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97, entries=150, sequenceid=40, filesize=30.2 K 2024-11-07T17:17:47,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/0624822464e145c093dd368c8875594a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0624822464e145c093dd368c8875594a 2024-11-07T17:17:47,070 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0624822464e145c093dd368c8875594a, entries=150, sequenceid=40, filesize=11.7 K 2024-11-07T17:17:47,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/e3ae45c727eb47429f8be17ce44f14db as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e3ae45c727eb47429f8be17ce44f14db 2024-11-07T17:17:47,073 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e3ae45c727eb47429f8be17ce44f14db, entries=150, sequenceid=40, filesize=11.7 K 2024-11-07T17:17:47,074 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 990807c0d50040fb7da6789c8418caee in 1656ms, sequenceid=40, compaction requested=false 2024-11-07T17:17:47,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:47,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:47,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-07T17:17:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-07T17:17:47,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-07T17:17:47,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0290 sec 2024-11-07T17:17:47,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 3.0330 sec 2024-11-07T17:17:47,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:47,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T17:17:47,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:47,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:47,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:47,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:47,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:47,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:47,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072ed8995b615b4eb1a4b065d01243f6d1_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:47,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742407_1583 (size=14594) 2024-11-07T17:17:47,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999927321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999927322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999927322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999927323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999927425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999927425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999927425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999927426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999927627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999927628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999927628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999927628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,722 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:47,725 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072ed8995b615b4eb1a4b065d01243f6d1_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072ed8995b615b4eb1a4b065d01243f6d1_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:47,725 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/1162fd51dc864fa498d655bc49faa5fe, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:47,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/1162fd51dc864fa498d655bc49faa5fe is 175, key is test_row_0/A:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:47,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742408_1584 (size=39549) 2024-11-07T17:17:47,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999927930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999927931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999927931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:47,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:47,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999927931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,134 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/1162fd51dc864fa498d655bc49faa5fe 2024-11-07T17:17:48,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/5ee0f969dcc74b56b6b355c1a482b553 is 50, key is test_row_0/B:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:48,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T17:17:48,150 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-07T17:17:48,152 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:48,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-07T17:17:48,153 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:48,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:48,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742409_1585 (size=12001) 2024-11-07T17:17:48,154 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:48,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:48,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/5ee0f969dcc74b56b6b355c1a482b553 2024-11-07T17:17:48,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/1c1d4c798d7848f58243afea3220f743 is 50, key is test_row_0/C:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742410_1586 (size=12001) 2024-11-07T17:17:48,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/1c1d4c798d7848f58243afea3220f743 2024-11-07T17:17:48,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/1162fd51dc864fa498d655bc49faa5fe as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe 2024-11-07T17:17:48,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe, entries=200, sequenceid=55, filesize=38.6 K 2024-11-07T17:17:48,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/5ee0f969dcc74b56b6b355c1a482b553 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/5ee0f969dcc74b56b6b355c1a482b553 2024-11-07T17:17:48,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/5ee0f969dcc74b56b6b355c1a482b553, entries=150, sequenceid=55, filesize=11.7 K 2024-11-07T17:17:48,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/1c1d4c798d7848f58243afea3220f743 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1c1d4c798d7848f58243afea3220f743 2024-11-07T17:17:48,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1c1d4c798d7848f58243afea3220f743, entries=150, sequenceid=55, filesize=11.7 K 2024-11-07T17:17:48,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 990807c0d50040fb7da6789c8418caee in 886ms, sequenceid=55, compaction requested=true 2024-11-07T17:17:48,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:48,189 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:48,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:48,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:48,189 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:48,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:48,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:48,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:48,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:48,190 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:48,190 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:17:48,190 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,190 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=99.1 K 2024-11-07T17:17:48,190 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,190 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe] 2024-11-07T17:17:48,191 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05f56f82a8b449bf93b0ff89ea2a48ab, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730999864052 2024-11-07T17:17:48,191 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:48,191 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:17:48,191 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,191 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/14354eb6130e433dae7aef827ee34558, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0624822464e145c093dd368c8875594a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/5ee0f969dcc74b56b6b355c1a482b553] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=35.2 K 2024-11-07T17:17:48,191 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2eb889ca96324e4593f6c1c98d326c97, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730999864064 2024-11-07T17:17:48,192 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1162fd51dc864fa498d655bc49faa5fe, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730999866189 2024-11-07T17:17:48,192 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 14354eb6130e433dae7aef827ee34558, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730999864052 2024-11-07T17:17:48,192 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0624822464e145c093dd368c8875594a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730999864064 2024-11-07T17:17:48,193 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ee0f969dcc74b56b6b355c1a482b553, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730999866190 2024-11-07T17:17:48,199 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:48,202 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411078113f044fe8e431b80d80d06b14a7fe8_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:48,204 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411078113f044fe8e431b80d80d06b14a7fe8_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:48,204 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078113f044fe8e431b80d80d06b14a7fe8_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:48,210 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:48,211 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/556b79d5b6b54ae8ba3ca5705d6fb008 is 50, key is test_row_0/B:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:48,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742411_1587 (size=4469) 2024-11-07T17:17:48,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:48,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T17:17:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:48,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742412_1588 (size=12104) 2024-11-07T17:17:48,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075dddb83e9efc42d8b624f48a7e2c70f1_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999868220/Put/seqid=0 2024-11-07T17:17:48,238 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/556b79d5b6b54ae8ba3ca5705d6fb008 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/556b79d5b6b54ae8ba3ca5705d6fb008 2024-11-07T17:17:48,242 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into 556b79d5b6b54ae8ba3ca5705d6fb008(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:48,242 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:48,242 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=13, startTime=1730999868189; duration=0sec 2024-11-07T17:17:48,242 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:48,242 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:17:48,243 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:48,243 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:48,243 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:17:48,243 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,244 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/fb35eecf00dc4a6ca5e9dafa8cb879ae, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e3ae45c727eb47429f8be17ce44f14db, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1c1d4c798d7848f58243afea3220f743] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=35.2 K 2024-11-07T17:17:48,244 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting fb35eecf00dc4a6ca5e9dafa8cb879ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730999864052 2024-11-07T17:17:48,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742413_1589 (size=14594) 2024-11-07T17:17:48,246 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e3ae45c727eb47429f8be17ce44f14db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730999864064 2024-11-07T17:17:48,246 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c1d4c798d7848f58243afea3220f743, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730999866190 2024-11-07T17:17:48,253 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#501 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:48,254 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/9e119ff147674ce78f58c85b41e01830 is 50, key is test_row_0/C:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:48,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:48,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742414_1590 (size=12104) 2024-11-07T17:17:48,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999928261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,264 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/9e119ff147674ce78f58c85b41e01830 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9e119ff147674ce78f58c85b41e01830 2024-11-07T17:17:48,267 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into 9e119ff147674ce78f58c85b41e01830(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:48,267 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:48,267 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=13, startTime=1730999868189; duration=0sec 2024-11-07T17:17:48,267 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:48,267 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:17:48,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:48,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:48,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999928364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999928433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999928433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999928433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999928436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:48,458 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:48,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:48,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999928567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:48,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:48,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,619 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#498 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:48,619 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/69db453cf01c48ea97c04df7c4df31aa is 175, key is test_row_0/A:col10/1730999866193/Put/seqid=0 2024-11-07T17:17:48,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742415_1591 (size=31058) 2024-11-07T17:17:48,646 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:48,649 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075dddb83e9efc42d8b624f48a7e2c70f1_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075dddb83e9efc42d8b624f48a7e2c70f1_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:48,651 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f578d65196e640d199e3591c9d2769a3, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:48,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f578d65196e640d199e3591c9d2769a3 is 175, key is test_row_0/A:col10/1730999868220/Put/seqid=0 2024-11-07T17:17:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742416_1592 (size=39549) 2024-11-07T17:17:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:48,763 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:48,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:48,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:48,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999928869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:48,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:48,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:48,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:48,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:49,027 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/69db453cf01c48ea97c04df7c4df31aa as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/69db453cf01c48ea97c04df7c4df31aa 2024-11-07T17:17:49,035 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 69db453cf01c48ea97c04df7c4df31aa(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:49,035 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:49,035 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=13, startTime=1730999868189; duration=0sec 2024-11-07T17:17:49,035 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:49,035 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:17:49,056 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f578d65196e640d199e3591c9d2769a3 2024-11-07T17:17:49,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/4a13c95153b044ef9a8f5e3a85297333 is 50, key is test_row_0/B:col10/1730999868220/Put/seqid=0 2024-11-07T17:17:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742417_1593 (size=12001) 2024-11-07T17:17:49,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/4a13c95153b044ef9a8f5e3a85297333 2024-11-07T17:17:49,069 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:49,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:49,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:49,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:49,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/878021521537459588daa803eea9fc58 is 50, key is test_row_0/C:col10/1730999868220/Put/seqid=0 2024-11-07T17:17:49,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742418_1594 (size=12001) 2024-11-07T17:17:49,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/878021521537459588daa803eea9fc58 2024-11-07T17:17:49,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f578d65196e640d199e3591c9d2769a3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3 2024-11-07T17:17:49,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3, entries=200, sequenceid=77, filesize=38.6 K 2024-11-07T17:17:49,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/4a13c95153b044ef9a8f5e3a85297333 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a13c95153b044ef9a8f5e3a85297333 2024-11-07T17:17:49,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a13c95153b044ef9a8f5e3a85297333, entries=150, sequenceid=77, filesize=11.7 K 2024-11-07T17:17:49,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/878021521537459588daa803eea9fc58 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/878021521537459588daa803eea9fc58 2024-11-07T17:17:49,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/878021521537459588daa803eea9fc58, entries=150, sequenceid=77, filesize=11.7 K 2024-11-07T17:17:49,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 990807c0d50040fb7da6789c8418caee in 875ms, sequenceid=77, compaction requested=false 2024-11-07T17:17:49,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:49,222 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T17:17:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:49,223 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T17:17:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:49,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:49,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:49,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107351d70c94b0f46c2a1395d9fdec7389f_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999868257/Put/seqid=0 2024-11-07T17:17:49,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742419_1595 (size=12154) 2024-11-07T17:17:49,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:49,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:49,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:49,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999929416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999929437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999929439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999929441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999929447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999929518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:49,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:49,639 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107351d70c94b0f46c2a1395d9fdec7389f_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107351d70c94b0f46c2a1395d9fdec7389f_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:49,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d7a4a5a087b4431db03de4f129a7b621, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:49,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d7a4a5a087b4431db03de4f129a7b621 is 175, key is test_row_0/A:col10/1730999868257/Put/seqid=0 2024-11-07T17:17:49,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742420_1596 (size=30955) 2024-11-07T17:17:49,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999929721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:50,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999930025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:50,044 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d7a4a5a087b4431db03de4f129a7b621 2024-11-07T17:17:50,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/0809016635d44e539476c79eee312ecf is 50, key is test_row_0/B:col10/1730999868257/Put/seqid=0 2024-11-07T17:17:50,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742421_1597 (size=12001) 2024-11-07T17:17:50,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:50,454 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/0809016635d44e539476c79eee312ecf 2024-11-07T17:17:50,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/79cee0cf58d74028a10a62979a548f4b is 50, key is test_row_0/C:col10/1730999868257/Put/seqid=0 2024-11-07T17:17:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742422_1598 (size=12001) 2024-11-07T17:17:50,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:50,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999930531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:50,870 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/79cee0cf58d74028a10a62979a548f4b 2024-11-07T17:17:50,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d7a4a5a087b4431db03de4f129a7b621 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621 2024-11-07T17:17:50,877 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621, entries=150, sequenceid=94, filesize=30.2 K 2024-11-07T17:17:50,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-07T17:17:50,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/0809016635d44e539476c79eee312ecf as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0809016635d44e539476c79eee312ecf 2024-11-07T17:17:50,881 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0809016635d44e539476c79eee312ecf, entries=150, sequenceid=94, filesize=11.7 K 2024-11-07T17:17:50,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/79cee0cf58d74028a10a62979a548f4b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/79cee0cf58d74028a10a62979a548f4b 2024-11-07T17:17:50,884 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/79cee0cf58d74028a10a62979a548f4b, entries=150, sequenceid=94, filesize=11.7 K 2024-11-07T17:17:50,885 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 990807c0d50040fb7da6789c8418caee in 1662ms, sequenceid=94, compaction requested=true 2024-11-07T17:17:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-07T17:17:50,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-07T17:17:50,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-07T17:17:50,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7320 sec 2024-11-07T17:17:50,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.7360 sec 2024-11-07T17:17:51,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:51,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T17:17:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:51,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411073c5e157363b24ba383dbda9dc5642f88_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:51,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742423_1599 (size=12154) 2024-11-07T17:17:51,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999931463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999931464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999931464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999931465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999931542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999931567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999931568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999931568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999931568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999931771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999931771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999931771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999931772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:51,866 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:51,869 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411073c5e157363b24ba383dbda9dc5642f88_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073c5e157363b24ba383dbda9dc5642f88_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:51,870 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f5a2eb7e531b407486aa1f6db8db7571, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:51,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f5a2eb7e531b407486aa1f6db8db7571 is 175, key is test_row_0/A:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:51,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742424_1600 (size=30955) 2024-11-07T17:17:52,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999932073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999932074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999932074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999932075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T17:17:52,258 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-07T17:17:52,259 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-07T17:17:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T17:17:52,261 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:52,261 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:52,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:52,277 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f5a2eb7e531b407486aa1f6db8db7571 2024-11-07T17:17:52,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/40a7932d4521450bb9b97cddbabc489a is 50, key is test_row_0/B:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:52,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742425_1601 (size=12001) 2024-11-07T17:17:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T17:17:52,412 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T17:17:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:52,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T17:17:52,565 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T17:17:52,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:52,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:52,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:52,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999932577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999932580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999932581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999932582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/40a7932d4521450bb9b97cddbabc489a 2024-11-07T17:17:52,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/74ae5e58ff6545619cf18d3627ebdedc is 50, key is test_row_0/C:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:52,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742426_1602 (size=12001) 2024-11-07T17:17:52,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/74ae5e58ff6545619cf18d3627ebdedc 2024-11-07T17:17:52,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f5a2eb7e531b407486aa1f6db8db7571 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571 2024-11-07T17:17:52,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571, entries=150, sequenceid=117, filesize=30.2 K 2024-11-07T17:17:52,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/40a7932d4521450bb9b97cddbabc489a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/40a7932d4521450bb9b97cddbabc489a 2024-11-07T17:17:52,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/40a7932d4521450bb9b97cddbabc489a, entries=150, sequenceid=117, filesize=11.7 K 2024-11-07T17:17:52,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/74ae5e58ff6545619cf18d3627ebdedc as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/74ae5e58ff6545619cf18d3627ebdedc 2024-11-07T17:17:52,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/74ae5e58ff6545619cf18d3627ebdedc, entries=150, sequenceid=117, filesize=11.7 K 2024-11-07T17:17:52,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 990807c0d50040fb7da6789c8418caee in 1259ms, sequenceid=117, compaction requested=true 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:52,711 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:52,711 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:52,712 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:52,712 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:52,712 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:17:52,712 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:17:52,712 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,712 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,712 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/556b79d5b6b54ae8ba3ca5705d6fb008, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a13c95153b044ef9a8f5e3a85297333, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0809016635d44e539476c79eee312ecf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/40a7932d4521450bb9b97cddbabc489a] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=47.0 K 2024-11-07T17:17:52,712 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/69db453cf01c48ea97c04df7c4df31aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=129.4 K 2024-11-07T17:17:52,712 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,713 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/69db453cf01c48ea97c04df7c4df31aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571] 2024-11-07T17:17:52,713 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 556b79d5b6b54ae8ba3ca5705d6fb008, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730999866190 2024-11-07T17:17:52,713 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69db453cf01c48ea97c04df7c4df31aa, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730999866190 2024-11-07T17:17:52,713 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a13c95153b044ef9a8f5e3a85297333, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1730999867316 2024-11-07T17:17:52,713 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f578d65196e640d199e3591c9d2769a3, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1730999867316 2024-11-07T17:17:52,713 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 0809016635d44e539476c79eee312ecf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730999868254 2024-11-07T17:17:52,714 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7a4a5a087b4431db03de4f129a7b621, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730999868254 2024-11-07T17:17:52,714 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 40a7932d4521450bb9b97cddbabc489a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999869407 2024-11-07T17:17:52,714 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5a2eb7e531b407486aa1f6db8db7571, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999869407 2024-11-07T17:17:52,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:52,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T17:17:52,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:52,718 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T17:17:52,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:52,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:52,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:52,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:52,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:52,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:52,721 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:52,721 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:52,722 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/dbb933587efe4229ae015e64f19e892a is 50, key is test_row_0/B:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:52,726 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107abe0f1b75e30400bbe02ec724608a585_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:52,728 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107abe0f1b75e30400bbe02ec724608a585_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:52,729 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107abe0f1b75e30400bbe02ec724608a585_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:52,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072d1aae25cded405191eb7cae00adf2be_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999871463/Put/seqid=0 2024-11-07T17:17:52,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742427_1603 (size=12241) 2024-11-07T17:17:52,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742428_1604 (size=4469) 2024-11-07T17:17:52,746 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#511 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:52,746 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/008e312220bc425c9ad67a251446fea8 is 175, key is test_row_0/A:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:52,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742429_1605 (size=12204) 2024-11-07T17:17:52,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742430_1606 (size=31195) 2024-11-07T17:17:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T17:17:53,146 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/dbb933587efe4229ae015e64f19e892a as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/dbb933587efe4229ae015e64f19e892a 2024-11-07T17:17:53,150 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into dbb933587efe4229ae015e64f19e892a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:53,150 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:53,150 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=12, startTime=1730999872711; duration=0sec 2024-11-07T17:17:53,150 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:53,150 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:17:53,150 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:17:53,151 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:17:53,151 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:17:53,151 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,152 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9e119ff147674ce78f58c85b41e01830, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/878021521537459588daa803eea9fc58, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/79cee0cf58d74028a10a62979a548f4b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/74ae5e58ff6545619cf18d3627ebdedc] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=47.0 K 2024-11-07T17:17:53,152 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e119ff147674ce78f58c85b41e01830, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730999866190 2024-11-07T17:17:53,152 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 878021521537459588daa803eea9fc58, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1730999867316 2024-11-07T17:17:53,153 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 79cee0cf58d74028a10a62979a548f4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730999868254 2024-11-07T17:17:53,153 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 74ae5e58ff6545619cf18d3627ebdedc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999869407 2024-11-07T17:17:53,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,157 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072d1aae25cded405191eb7cae00adf2be_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072d1aae25cded405191eb7cae00adf2be_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:53,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/473362c2877c4475a11792b7c8fb5739, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:53,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/473362c2877c4475a11792b7c8fb5739 is 175, key is test_row_0/A:col10/1730999871463/Put/seqid=0 2024-11-07T17:17:53,165 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/008e312220bc425c9ad67a251446fea8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/008e312220bc425c9ad67a251446fea8 2024-11-07T17:17:53,169 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 008e312220bc425c9ad67a251446fea8(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:53,169 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:53,169 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=12, startTime=1730999872711; duration=0sec 2024-11-07T17:17:53,169 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:53,169 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:17:53,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742431_1607 (size=31005) 2024-11-07T17:17:53,174 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/473362c2877c4475a11792b7c8fb5739 2024-11-07T17:17:53,174 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#513 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:53,175 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/46b7504ccc094eb8b04ad64e44c924a9 is 50, key is test_row_0/C:col10/1730999869407/Put/seqid=0 2024-11-07T17:17:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/9823fb5298a04d4ab57d5c721b25f3ba is 50, key is test_row_0/B:col10/1730999871463/Put/seqid=0 2024-11-07T17:17:53,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742432_1608 (size=12241) 2024-11-07T17:17:53,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742433_1609 (size=12051) 2024-11-07T17:17:53,184 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/9823fb5298a04d4ab57d5c721b25f3ba 2024-11-07T17:17:53,186 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/46b7504ccc094eb8b04ad64e44c924a9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/46b7504ccc094eb8b04ad64e44c924a9 2024-11-07T17:17:53,190 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into 46b7504ccc094eb8b04ad64e44c924a9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:53,190 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:53,190 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=12, startTime=1730999872711; duration=0sec 2024-11-07T17:17:53,190 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:53,190 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:17:53,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/1ecc5435db804f5db429eeb0befd865d is 50, key is test_row_0/C:col10/1730999871463/Put/seqid=0 2024-11-07T17:17:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742434_1610 (size=12051) 2024-11-07T17:17:53,193 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/1ecc5435db804f5db429eeb0befd865d 2024-11-07T17:17:53,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/473362c2877c4475a11792b7c8fb5739 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739 2024-11-07T17:17:53,200 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739, entries=150, sequenceid=130, filesize=30.3 K 2024-11-07T17:17:53,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/9823fb5298a04d4ab57d5c721b25f3ba as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9823fb5298a04d4ab57d5c721b25f3ba 2024-11-07T17:17:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,203 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9823fb5298a04d4ab57d5c721b25f3ba, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T17:17:53,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/1ecc5435db804f5db429eeb0befd865d as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1ecc5435db804f5db429eeb0befd865d 2024-11-07T17:17:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,207 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1ecc5435db804f5db429eeb0befd865d, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T17:17:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,207 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 990807c0d50040fb7da6789c8418caee in 489ms, sequenceid=130, compaction requested=false 2024-11-07T17:17:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-07T17:17:53,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 947 msec 2024-11-07T17:17:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 951 msec 2024-11-07T17:17:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T17:17:53,363 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-07T17:17:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T17:17:53,366 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,367 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:53,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T17:17:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,519 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-07T17:17:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:53,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-07T17:17:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-07T17:17:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-07T17:17:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-07T17:17:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,524 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 158 msec 2024-11-07T17:17:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:53,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:17:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:53,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:53,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:53,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:53,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075c2aeec32ea047079cba1d21049e7eb0_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:53,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742435_1611 (size=17284) 2024-11-07T17:17:53,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999933660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999933660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999933661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999933662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999933662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T17:17:53,668 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-07T17:17:53,669 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-07T17:17:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-07T17:17:53,671 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:53,671 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:53,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:53,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-07T17:17:53,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:53,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:53,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:53,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:53,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:53,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999933968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999933968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999933969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999933969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:53,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999933969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-07T17:17:53,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:53,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:53,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:53,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,056 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:54,060 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075c2aeec32ea047079cba1d21049e7eb0_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075c2aeec32ea047079cba1d21049e7eb0_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:54,060 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/bcebb6b0d0764c0083493bb63b37a69e, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:54,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/bcebb6b0d0764c0083493bb63b37a69e is 175, key is test_row_0/A:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:54,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742436_1612 (size=48389) 2024-11-07T17:17:54,065 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=146, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/bcebb6b0d0764c0083493bb63b37a69e 2024-11-07T17:17:54,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/ffdca72539f4419a985d9d35f2aeb6db is 50, key is test_row_0/B:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742437_1613 (size=12151) 2024-11-07T17:17:54,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:54,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:54,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-07T17:17:54,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999934271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999934272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999934272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999934272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999934273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,281 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/ffdca72539f4419a985d9d35f2aeb6db 2024-11-07T17:17:54,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/3eecd007b2b54a1cac244ee9b57be917 is 50, key is test_row_0/C:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:54,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742438_1614 (size=12151) 2024-11-07T17:17:54,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:54,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:54,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,739 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-07T17:17:54,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999934774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999934775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999934776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999934777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999934778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/3eecd007b2b54a1cac244ee9b57be917 2024-11-07T17:17:54,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/bcebb6b0d0764c0083493bb63b37a69e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e 2024-11-07T17:17:54,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e, entries=250, sequenceid=146, filesize=47.3 K 2024-11-07T17:17:54,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/ffdca72539f4419a985d9d35f2aeb6db as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/ffdca72539f4419a985d9d35f2aeb6db 2024-11-07T17:17:54,891 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:54,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:54,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/ffdca72539f4419a985d9d35f2aeb6db, entries=150, sequenceid=146, filesize=11.9 K 2024-11-07T17:17:54,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/3eecd007b2b54a1cac244ee9b57be917 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/3eecd007b2b54a1cac244ee9b57be917 2024-11-07T17:17:54,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/3eecd007b2b54a1cac244ee9b57be917, entries=150, sequenceid=146, filesize=11.9 K 2024-11-07T17:17:54,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 990807c0d50040fb7da6789c8418caee in 1266ms, sequenceid=146, compaction requested=true 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:54,899 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T17:17:54,900 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:54,900 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:17:54,900 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,900 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/008e312220bc425c9ad67a251446fea8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=108.0 K 2024-11-07T17:17:54,900 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,900 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/008e312220bc425c9ad67a251446fea8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e] 2024-11-07T17:17:54,901 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 008e312220bc425c9ad67a251446fea8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999869407 2024-11-07T17:17:54,901 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 473362c2877c4475a11792b7c8fb5739, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730999871459 2024-11-07T17:17:54,901 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcebb6b0d0764c0083493bb63b37a69e, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1730999873610 2024-11-07T17:17:54,902 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:54,902 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:54,902 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:17:54,902 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,903 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/dbb933587efe4229ae015e64f19e892a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9823fb5298a04d4ab57d5c721b25f3ba, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/ffdca72539f4419a985d9d35f2aeb6db] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=35.6 K 2024-11-07T17:17:54,903 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting dbb933587efe4229ae015e64f19e892a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999869407 2024-11-07T17:17:54,903 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9823fb5298a04d4ab57d5c721b25f3ba, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730999871459 2024-11-07T17:17:54,903 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting ffdca72539f4419a985d9d35f2aeb6db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1730999873610 2024-11-07T17:17:54,906 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:54,908 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107bb57dd667da54cbd9682f54123ed830d_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:54,910 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107bb57dd667da54cbd9682f54123ed830d_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:54,910 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107bb57dd667da54cbd9682f54123ed830d_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:54,914 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:54,915 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/d0bbfe6cfb134e3bb3e1fdea3434f645 is 50, key is test_row_0/B:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:54,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742439_1615 (size=4469) 2024-11-07T17:17:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742440_1616 (size=12493) 2024-11-07T17:17:54,922 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/d0bbfe6cfb134e3bb3e1fdea3434f645 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d0bbfe6cfb134e3bb3e1fdea3434f645 2024-11-07T17:17:54,925 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into d0bbfe6cfb134e3bb3e1fdea3434f645(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:54,925 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:54,925 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=13, startTime=1730999874899; duration=0sec 2024-11-07T17:17:54,925 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:54,925 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:17:54,925 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:54,927 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:54,927 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:17:54,927 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:54,927 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/46b7504ccc094eb8b04ad64e44c924a9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1ecc5435db804f5db429eeb0befd865d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/3eecd007b2b54a1cac244ee9b57be917] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=35.6 K 2024-11-07T17:17:54,927 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 46b7504ccc094eb8b04ad64e44c924a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730999869407 2024-11-07T17:17:54,927 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ecc5435db804f5db429eeb0befd865d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730999871459 2024-11-07T17:17:54,928 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eecd007b2b54a1cac244ee9b57be917, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1730999873610 2024-11-07T17:17:54,934 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#521 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:54,934 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/48d52437324c40a79170026651570af9 is 50, key is test_row_0/C:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742441_1617 (size=12493) 2024-11-07T17:17:54,945 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/48d52437324c40a79170026651570af9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/48d52437324c40a79170026651570af9 2024-11-07T17:17:54,948 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into 48d52437324c40a79170026651570af9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:54,948 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:54,948 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=13, startTime=1730999874899; duration=0sec 2024-11-07T17:17:54,949 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:54,949 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:17:55,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-07T17:17:55,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:55,044 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T17:17:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:55,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075eb844a873634c5b8a42ba159f5eb9fe_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999873661/Put/seqid=0 2024-11-07T17:17:55,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742442_1618 (size=12304) 2024-11-07T17:17:55,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,057 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075eb844a873634c5b8a42ba159f5eb9fe_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075eb844a873634c5b8a42ba159f5eb9fe_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:55,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f269c679c82c4267a2d9e1f3a8df2d66, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:55,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f269c679c82c4267a2d9e1f3a8df2d66 is 175, key is test_row_0/A:col10/1730999873661/Put/seqid=0 2024-11-07T17:17:55,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742443_1619 (size=31105) 2024-11-07T17:17:55,062 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f269c679c82c4267a2d9e1f3a8df2d66 2024-11-07T17:17:55,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/8d5f86cca8b04cdcb7d2a40a4948f4e2 is 50, key is test_row_0/B:col10/1730999873661/Put/seqid=0 2024-11-07T17:17:55,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742444_1620 (size=12151) 2024-11-07T17:17:55,317 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#519 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:55,317 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/4c90df905003431ba10bed79983df6da is 175, key is test_row_0/A:col10/1730999873633/Put/seqid=0 2024-11-07T17:17:55,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742445_1621 (size=31447) 2024-11-07T17:17:55,472 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/8d5f86cca8b04cdcb7d2a40a4948f4e2 2024-11-07T17:17:55,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/cbaf4ab6af884fda9fcd6876640f63ab is 50, key is test_row_0/C:col10/1730999873661/Put/seqid=0 2024-11-07T17:17:55,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742446_1622 (size=12151) 2024-11-07T17:17:55,482 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/cbaf4ab6af884fda9fcd6876640f63ab 2024-11-07T17:17:55,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/f269c679c82c4267a2d9e1f3a8df2d66 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66 2024-11-07T17:17:55,488 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66, entries=150, sequenceid=173, filesize=30.4 K 2024-11-07T17:17:55,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/8d5f86cca8b04cdcb7d2a40a4948f4e2 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/8d5f86cca8b04cdcb7d2a40a4948f4e2 2024-11-07T17:17:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,492 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/8d5f86cca8b04cdcb7d2a40a4948f4e2, entries=150, sequenceid=173, filesize=11.9 K 2024-11-07T17:17:55,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/cbaf4ab6af884fda9fcd6876640f63ab as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/cbaf4ab6af884fda9fcd6876640f63ab 2024-11-07T17:17:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,497 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/cbaf4ab6af884fda9fcd6876640f63ab, entries=150, sequenceid=173, filesize=11.9 K 2024-11-07T17:17:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,498 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 990807c0d50040fb7da6789c8418caee in 454ms, sequenceid=173, compaction requested=false 2024-11-07T17:17:55,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:55,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:55,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-07T17:17:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-07T17:17:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-07T17:17:55,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8280 sec 2024-11-07T17:17:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.8340 sec 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,726 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/4c90df905003431ba10bed79983df6da as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4c90df905003431ba10bed79983df6da 2024-11-07T17:17:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,730 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 4c90df905003431ba10bed79983df6da(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:55,730 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:55,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,731 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=13, startTime=1730999874899; duration=0sec 2024-11-07T17:17:55,731 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:55,731 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:17:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-07T17:17:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,775 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-07T17:17:55,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,776 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-07T17:17:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T17:17:55,778 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,778 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:55,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:17:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:55,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:55,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:55,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:55,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:55,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071931fe7034e748fa97449cc7532e516d_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742448_1624 (size=24758) 2024-11-07T17:17:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T17:17:55,884 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:55,888 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071931fe7034e748fa97449cc7532e516d_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071931fe7034e748fa97449cc7532e516d_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:55,889 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/a627c8dceb1a4193a9f06296762d1823, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:55,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/a627c8dceb1a4193a9f06296762d1823 is 175, key is test_row_0/A:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:55,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742447_1623 (size=74395) 2024-11-07T17:17:55,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:55,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999935906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:55,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999935906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999935910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999935910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999935910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,931 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:55,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:55,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:55,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:55,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:55,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:55,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999936011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999936011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999936015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999936015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999936016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T17:17:56,084 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:56,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:56,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999936212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999936213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999936217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999936218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999936218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,237 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:56,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:56,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,238 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,298 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=185, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/a627c8dceb1a4193a9f06296762d1823 2024-11-07T17:17:56,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/185e3c4b20794a728e3f1ca3e24d35ce is 50, key is test_row_0/B:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:56,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742449_1625 (size=12151) 2024-11-07T17:17:56,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T17:17:56,390 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:56,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:56,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999936515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999936517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999936520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999936520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999936521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,543 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:56,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/185e3c4b20794a728e3f1ca3e24d35ce 2024-11-07T17:17:56,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/41f09ef074644d5ba7797234b03a19a5 is 50, key is test_row_0/C:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742450_1626 (size=12151) 2024-11-07T17:17:56,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/41f09ef074644d5ba7797234b03a19a5 2024-11-07T17:17:56,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/a627c8dceb1a4193a9f06296762d1823 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823 2024-11-07T17:17:56,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823, entries=400, sequenceid=185, filesize=72.7 K 2024-11-07T17:17:56,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/185e3c4b20794a728e3f1ca3e24d35ce as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/185e3c4b20794a728e3f1ca3e24d35ce 2024-11-07T17:17:56,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/185e3c4b20794a728e3f1ca3e24d35ce, entries=150, sequenceid=185, filesize=11.9 K 2024-11-07T17:17:56,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/41f09ef074644d5ba7797234b03a19a5 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/41f09ef074644d5ba7797234b03a19a5 2024-11-07T17:17:56,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/41f09ef074644d5ba7797234b03a19a5, entries=150, sequenceid=185, filesize=11.9 K 2024-11-07T17:17:56,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 990807c0d50040fb7da6789c8418caee in 940ms, sequenceid=185, compaction requested=true 2024-11-07T17:17:56,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:56,751 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:56,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:56,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:56,751 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:56,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:56,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:56,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:56,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:56,756 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:56,756 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:17:56,756 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,757 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4c90df905003431ba10bed79983df6da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=133.7 K 2024-11-07T17:17:56,757 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,757 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4c90df905003431ba10bed79983df6da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823] 2024-11-07T17:17:56,757 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:56,757 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:17:56,757 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,757 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d0bbfe6cfb134e3bb3e1fdea3434f645, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/8d5f86cca8b04cdcb7d2a40a4948f4e2, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/185e3c4b20794a728e3f1ca3e24d35ce] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=35.9 K 2024-11-07T17:17:56,758 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c90df905003431ba10bed79983df6da, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1730999873610 2024-11-07T17:17:56,758 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting d0bbfe6cfb134e3bb3e1fdea3434f645, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1730999873610 2024-11-07T17:17:56,758 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting f269c679c82c4267a2d9e1f3a8df2d66, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1730999873660 2024-11-07T17:17:56,759 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d5f86cca8b04cdcb7d2a40a4948f4e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1730999873660 2024-11-07T17:17:56,759 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting a627c8dceb1a4193a9f06296762d1823, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1730999875798 2024-11-07T17:17:56,759 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 185e3c4b20794a728e3f1ca3e24d35ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1730999875803 2024-11-07T17:17:56,767 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:56,768 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/e0fdc6b169114e75b975cfc48b027470 is 50, key is test_row_0/B:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:56,768 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:56,777 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107efcb81ad08824ae6bfe972b8eebdd662_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:56,779 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107efcb81ad08824ae6bfe972b8eebdd662_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:56,780 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107efcb81ad08824ae6bfe972b8eebdd662_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:56,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742451_1627 (size=12595) 2024-11-07T17:17:56,785 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/e0fdc6b169114e75b975cfc48b027470 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e0fdc6b169114e75b975cfc48b027470 2024-11-07T17:17:56,790 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into e0fdc6b169114e75b975cfc48b027470(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:56,790 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:56,790 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=13, startTime=1730999876751; duration=0sec 2024-11-07T17:17:56,790 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:56,790 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:17:56,791 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:56,792 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742452_1628 (size=4469) 2024-11-07T17:17:56,792 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:17:56,792 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,792 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/48d52437324c40a79170026651570af9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/cbaf4ab6af884fda9fcd6876640f63ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/41f09ef074644d5ba7797234b03a19a5] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=35.9 K 2024-11-07T17:17:56,793 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 48d52437324c40a79170026651570af9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1730999873610 2024-11-07T17:17:56,793 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting cbaf4ab6af884fda9fcd6876640f63ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1730999873660 2024-11-07T17:17:56,793 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 41f09ef074644d5ba7797234b03a19a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1730999875803 2024-11-07T17:17:56,799 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:56,800 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/dd685d6749de488c8a7402507ed0c549 is 50, key is test_row_0/C:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:56,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742453_1629 (size=12595) 2024-11-07T17:17:56,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:56,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-07T17:17:56,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:56,849 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T17:17:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:56,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ed77b825af7d4edfa699ada3b7f2ebe6_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999875902/Put/seqid=0 2024-11-07T17:17:56,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742454_1630 (size=12304) 2024-11-07T17:17:56,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T17:17:57,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:57,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:57,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999937026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999937027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999937029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999937030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999937031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999937132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999937135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999937135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,192 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#529 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:57,192 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/902bf823bd7e4245b6488e27dd4af7eb is 175, key is test_row_0/A:col10/1730999875810/Put/seqid=0 2024-11-07T17:17:57,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742455_1631 (size=31549) 2024-11-07T17:17:57,212 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/902bf823bd7e4245b6488e27dd4af7eb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/902bf823bd7e4245b6488e27dd4af7eb 2024-11-07T17:17:57,217 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 902bf823bd7e4245b6488e27dd4af7eb(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:57,217 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:57,217 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=13, startTime=1730999876751; duration=0sec 2024-11-07T17:17:57,217 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:57,217 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:17:57,222 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/dd685d6749de488c8a7402507ed0c549 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/dd685d6749de488c8a7402507ed0c549 2024-11-07T17:17:57,226 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into dd685d6749de488c8a7402507ed0c549(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:57,226 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:57,226 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=13, startTime=1730999876752; duration=0sec 2024-11-07T17:17:57,226 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:57,226 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:17:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:57,282 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ed77b825af7d4edfa699ada3b7f2ebe6_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ed77b825af7d4edfa699ada3b7f2ebe6_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:57,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d067c2eef74b4db2a7ce87abbebab290, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:57,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d067c2eef74b4db2a7ce87abbebab290 is 175, key is test_row_0/A:col10/1730999875902/Put/seqid=0 2024-11-07T17:17:57,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742456_1632 (size=31105) 2024-11-07T17:17:57,291 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d067c2eef74b4db2a7ce87abbebab290 2024-11-07T17:17:57,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/9c74cfc3e9954fab9179770bc701584b is 50, key is test_row_0/B:col10/1730999875902/Put/seqid=0 2024-11-07T17:17:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742457_1633 (size=12151) 2024-11-07T17:17:57,332 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/9c74cfc3e9954fab9179770bc701584b 2024-11-07T17:17:57,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999937334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999937337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999937338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/63ac26ef11ed4f849fc52362cbcbb6bb is 50, key is test_row_0/C:col10/1730999875902/Put/seqid=0 2024-11-07T17:17:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742458_1634 (size=12151) 2024-11-07T17:17:57,347 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/63ac26ef11ed4f849fc52362cbcbb6bb 2024-11-07T17:17:57,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/d067c2eef74b4db2a7ce87abbebab290 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290 2024-11-07T17:17:57,360 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290, entries=150, sequenceid=212, filesize=30.4 K 2024-11-07T17:17:57,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/9c74cfc3e9954fab9179770bc701584b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9c74cfc3e9954fab9179770bc701584b 2024-11-07T17:17:57,371 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9c74cfc3e9954fab9179770bc701584b, entries=150, sequenceid=212, filesize=11.9 K 2024-11-07T17:17:57,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/63ac26ef11ed4f849fc52362cbcbb6bb as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/63ac26ef11ed4f849fc52362cbcbb6bb 2024-11-07T17:17:57,376 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/63ac26ef11ed4f849fc52362cbcbb6bb, entries=150, sequenceid=212, filesize=11.9 K 2024-11-07T17:17:57,377 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 990807c0d50040fb7da6789c8418caee in 528ms, sequenceid=212, compaction requested=false 2024-11-07T17:17:57,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:57,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:57,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-07T17:17:57,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-07T17:17:57,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-07T17:17:57,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6000 sec 2024-11-07T17:17:57,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.6050 sec 2024-11-07T17:17:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:57,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T17:17:57,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:57,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:57,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:57,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:57,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:57,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:57,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a79705568db94a9686c77afe49092755_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:57,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742459_1635 (size=14794) 2024-11-07T17:17:57,652 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:57,655 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a79705568db94a9686c77afe49092755_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a79705568db94a9686c77afe49092755_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:57,655 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/c16b2ee8f39e4fef8dd3a8e4df4a5441, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:57,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/c16b2ee8f39e4fef8dd3a8e4df4a5441 is 175, key is test_row_0/A:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:57,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742460_1636 (size=39749) 2024-11-07T17:17:57,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999937664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999937665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999937666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999937768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999937768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999937769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T17:17:57,882 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-07T17:17:57,883 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:17:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-07T17:17:57,884 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:17:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:17:57,885 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:17:57,885 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:17:57,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999937971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999937971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999937973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:17:58,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-07T17:17:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999938035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:58,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999938040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,060 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=225, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/c16b2ee8f39e4fef8dd3a8e4df4a5441 2024-11-07T17:17:58,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/db00120bb6f5449f8be864a142d47ea6 is 50, key is test_row_0/B:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:58,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742461_1637 (size=12151) 2024-11-07T17:17:58,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/db00120bb6f5449f8be864a142d47ea6 2024-11-07T17:17:58,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/6e90de869c334cc18e5a2516bce61e15 is 50, key is test_row_0/C:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:58,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742462_1638 (size=12151) 2024-11-07T17:17:58,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:17:58,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-07T17:17:58,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:58,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999938273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999938275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999938275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,341 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-07T17:17:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:17:58,494 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-07T17:17:58,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:58,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:17:58,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/6e90de869c334cc18e5a2516bce61e15 2024-11-07T17:17:58,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/c16b2ee8f39e4fef8dd3a8e4df4a5441 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441 2024-11-07T17:17:58,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441, entries=200, sequenceid=225, filesize=38.8 K 2024-11-07T17:17:58,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/db00120bb6f5449f8be864a142d47ea6 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/db00120bb6f5449f8be864a142d47ea6 2024-11-07T17:17:58,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/db00120bb6f5449f8be864a142d47ea6, entries=150, sequenceid=225, filesize=11.9 K 2024-11-07T17:17:58,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/6e90de869c334cc18e5a2516bce61e15 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/6e90de869c334cc18e5a2516bce61e15 2024-11-07T17:17:58,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/6e90de869c334cc18e5a2516bce61e15, entries=150, sequenceid=225, filesize=11.9 K 2024-11-07T17:17:58,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 990807c0d50040fb7da6789c8418caee in 881ms, sequenceid=225, compaction requested=true 2024-11-07T17:17:58,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:58,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:17:58,518 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:58,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:58,518 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:58,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:17:58,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:58,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:17:58,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:58,519 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:58,519 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:17:58,519 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,519 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e0fdc6b169114e75b975cfc48b027470, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9c74cfc3e9954fab9179770bc701584b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/db00120bb6f5449f8be864a142d47ea6] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=36.0 K 2024-11-07T17:17:58,519 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:58,520 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:17:58,520 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,520 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/902bf823bd7e4245b6488e27dd4af7eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=100.0 K 2024-11-07T17:17:58,520 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,520 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/902bf823bd7e4245b6488e27dd4af7eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441] 2024-11-07T17:17:58,520 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e0fdc6b169114e75b975cfc48b027470, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1730999875803 2024-11-07T17:17:58,520 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 902bf823bd7e4245b6488e27dd4af7eb, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1730999875803 2024-11-07T17:17:58,521 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting d067c2eef74b4db2a7ce87abbebab290, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730999875902 2024-11-07T17:17:58,521 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c74cfc3e9954fab9179770bc701584b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730999875902 2024-11-07T17:17:58,521 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting c16b2ee8f39e4fef8dd3a8e4df4a5441, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1730999877022 2024-11-07T17:17:58,521 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting db00120bb6f5449f8be864a142d47ea6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1730999877022 2024-11-07T17:17:58,544 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:58,546 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#538 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:58,546 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024110713a9174b3e3b41e2830ed79a3d0f4dec_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:58,547 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/be425bad5d9e44dd845188f819569038 is 50, key is test_row_0/B:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:58,548 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024110713a9174b3e3b41e2830ed79a3d0f4dec_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:58,548 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110713a9174b3e3b41e2830ed79a3d0f4dec_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:58,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742463_1639 (size=12697) 2024-11-07T17:17:58,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742464_1640 (size=4469) 2024-11-07T17:17:58,647 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-07T17:17:58,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,648 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T17:17:58,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:17:58,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:58,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:17:58,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:58,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:17:58,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:17:58,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072740c413870f4598bd779a3418e5eef8_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999877664/Put/seqid=0 2024-11-07T17:17:58,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742465_1641 (size=12304) 2024-11-07T17:17:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:58,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:17:58,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999938785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999938786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999938787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999938889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999938890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:58,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999938891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:58,967 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/be425bad5d9e44dd845188f819569038 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/be425bad5d9e44dd845188f819569038 2024-11-07T17:17:58,971 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into be425bad5d9e44dd845188f819569038(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:58,971 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:58,971 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=13, startTime=1730999878518; duration=0sec 2024-11-07T17:17:58,972 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:17:58,972 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:17:58,972 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:17:58,973 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:17:58,973 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:17:58,973 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:17:58,973 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/dd685d6749de488c8a7402507ed0c549, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/63ac26ef11ed4f849fc52362cbcbb6bb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/6e90de869c334cc18e5a2516bce61e15] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=36.0 K 2024-11-07T17:17:58,973 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting dd685d6749de488c8a7402507ed0c549, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1730999875803 2024-11-07T17:17:58,973 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 63ac26ef11ed4f849fc52362cbcbb6bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730999875902 2024-11-07T17:17:58,974 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#537 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:58,974 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e90de869c334cc18e5a2516bce61e15, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1730999877022 2024-11-07T17:17:58,974 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/8e30300db68c466ea387ee6d8fb436df is 175, key is test_row_0/A:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:58,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742466_1642 (size=31651) 2024-11-07T17:17:58,979 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:17:58,979 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/39493b771f4e46678c1d329e97102385 is 50, key is test_row_0/C:col10/1730999877022/Put/seqid=0 2024-11-07T17:17:58,983 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/8e30300db68c466ea387ee6d8fb436df as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/8e30300db68c466ea387ee6d8fb436df 2024-11-07T17:17:58,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742467_1643 (size=12697) 2024-11-07T17:17:58,987 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 8e30300db68c466ea387ee6d8fb436df(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:58,987 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:58,987 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=13, startTime=1730999878518; duration=0sec 2024-11-07T17:17:58,987 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:58,987 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:17:58,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:17:59,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:17:59,065 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072740c413870f4598bd779a3418e5eef8_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072740c413870f4598bd779a3418e5eef8_990807c0d50040fb7da6789c8418caee 2024-11-07T17:17:59,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/74a708fff9ac420cbf6d08d04450e578, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:17:59,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/74a708fff9ac420cbf6d08d04450e578 is 175, key is test_row_0/A:col10/1730999877664/Put/seqid=0 2024-11-07T17:17:59,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742468_1644 (size=31105) 2024-11-07T17:17:59,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999939093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999939093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999939093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,387 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/39493b771f4e46678c1d329e97102385 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/39493b771f4e46678c1d329e97102385 2024-11-07T17:17:59,391 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into 39493b771f4e46678c1d329e97102385(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:17:59,391 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:17:59,391 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=13, startTime=1730999878519; duration=0sec 2024-11-07T17:17:59,391 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:17:59,391 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:17:59,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999939396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999939397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999939398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,470 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/74a708fff9ac420cbf6d08d04450e578 2024-11-07T17:17:59,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/4a4741c8b1a44653b453203b45bee6ff is 50, key is test_row_0/B:col10/1730999877664/Put/seqid=0 2024-11-07T17:17:59,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742469_1645 (size=12151) 2024-11-07T17:17:59,881 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/4a4741c8b1a44653b453203b45bee6ff 2024-11-07T17:17:59,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/66c7b00c3caa4e0e85388248e4a2951e is 50, key is test_row_0/C:col10/1730999877664/Put/seqid=0 2024-11-07T17:17:59,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742470_1646 (size=12151) 2024-11-07T17:17:59,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999939901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999939902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:17:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999939902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:17:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:18:00,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51472 deadline: 1730999940048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:00,050 DEBUG [Thread-2542 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:18:00,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51560 deadline: 1730999940053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:00,054 DEBUG [Thread-2544 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., hostname=3a0fde618c86,37403,1730999712734, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T17:18:00,292 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/66c7b00c3caa4e0e85388248e4a2951e 2024-11-07T17:18:00,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/74a708fff9ac420cbf6d08d04450e578 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578 2024-11-07T17:18:00,299 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578, entries=150, sequenceid=250, filesize=30.4 K 2024-11-07T17:18:00,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/4a4741c8b1a44653b453203b45bee6ff as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a4741c8b1a44653b453203b45bee6ff 2024-11-07T17:18:00,303 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a4741c8b1a44653b453203b45bee6ff, entries=150, sequenceid=250, filesize=11.9 K 2024-11-07T17:18:00,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/66c7b00c3caa4e0e85388248e4a2951e as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/66c7b00c3caa4e0e85388248e4a2951e 2024-11-07T17:18:00,307 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/66c7b00c3caa4e0e85388248e4a2951e, entries=150, sequenceid=250, filesize=11.9 K 2024-11-07T17:18:00,308 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 990807c0d50040fb7da6789c8418caee in 1660ms, sequenceid=250, compaction requested=false 2024-11-07T17:18:00,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:00,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:00,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-07T17:18:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-07T17:18:00,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-07T17:18:00,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4240 sec 2024-11-07T17:18:00,312 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 2.4280 sec 2024-11-07T17:18:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:00,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T17:18:00,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:18:00,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:00,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:18:00,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:00,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:18:00,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:00,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071de629e9023e47db836d5e414752d875_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:00,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742471_1647 (size=12404) 2024-11-07T17:18:00,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:00,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999940986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:00,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:00,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999940988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999940988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999941089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999941092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999941092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999941292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999941295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999941296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,322 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:18:01,325 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071de629e9023e47db836d5e414752d875_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071de629e9023e47db836d5e414752d875_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:01,325 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/9eb7af338a6c42958483af397d331344, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:01,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/9eb7af338a6c42958483af397d331344 is 175, key is test_row_0/A:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:01,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742472_1648 (size=31205) 2024-11-07T17:18:01,329 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=265, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/9eb7af338a6c42958483af397d331344 2024-11-07T17:18:01,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/3cb93e7a946b47ccb8f3766b27634c31 is 50, key is test_row_0/B:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:01,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742473_1649 (size=12251) 2024-11-07T17:18:01,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/3cb93e7a946b47ccb8f3766b27634c31 2024-11-07T17:18:01,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/27e83efae9464112849ffef94904140f is 50, key is test_row_0/C:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742474_1650 (size=12251) 2024-11-07T17:18:01,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999941595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999941597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:01,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999941598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:01,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/27e83efae9464112849ffef94904140f 2024-11-07T17:18:01,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/9eb7af338a6c42958483af397d331344 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344 2024-11-07T17:18:01,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344, entries=150, sequenceid=265, filesize=30.5 K 2024-11-07T17:18:01,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/3cb93e7a946b47ccb8f3766b27634c31 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3cb93e7a946b47ccb8f3766b27634c31 2024-11-07T17:18:01,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3cb93e7a946b47ccb8f3766b27634c31, entries=150, sequenceid=265, filesize=12.0 K 2024-11-07T17:18:01,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/27e83efae9464112849ffef94904140f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/27e83efae9464112849ffef94904140f 2024-11-07T17:18:01,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/27e83efae9464112849ffef94904140f, entries=150, sequenceid=265, filesize=12.0 K 2024-11-07T17:18:01,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 990807c0d50040fb7da6789c8418caee in 852ms, sequenceid=265, compaction requested=true 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:01,761 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:18:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:18:01,761 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93961 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:18:01,762 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:01,762 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:01,762 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/8e30300db68c466ea387ee6d8fb436df, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=91.8 K 2024-11-07T17:18:01,762 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/be425bad5d9e44dd845188f819569038, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a4741c8b1a44653b453203b45bee6ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3cb93e7a946b47ccb8f3766b27634c31] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=36.2 K 2024-11-07T17:18:01,762 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/8e30300db68c466ea387ee6d8fb436df, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344] 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e30300db68c466ea387ee6d8fb436df, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1730999877022 2024-11-07T17:18:01,762 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting be425bad5d9e44dd845188f819569038, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1730999877022 2024-11-07T17:18:01,763 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74a708fff9ac420cbf6d08d04450e578, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999877656 2024-11-07T17:18:01,763 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a4741c8b1a44653b453203b45bee6ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999877656 2024-11-07T17:18:01,763 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9eb7af338a6c42958483af397d331344, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1730999878782 2024-11-07T17:18:01,763 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cb93e7a946b47ccb8f3766b27634c31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1730999878782 2024-11-07T17:18:01,769 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#546 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:18:01,769 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:01,769 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/71735ccc22a14d53aeb79e36be631bce is 50, key is test_row_0/B:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:01,771 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107bfc53ca6ea1c408f8deff4278df5b6ab_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:01,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742475_1651 (size=12899) 2024-11-07T17:18:01,773 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107bfc53ca6ea1c408f8deff4278df5b6ab_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:01,773 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107bfc53ca6ea1c408f8deff4278df5b6ab_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:01,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742476_1652 (size=4469) 2024-11-07T17:18:01,777 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#547 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:18:01,777 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/6a7e8e23adf74e43a98e8bb7cb78e7d3 is 175, key is test_row_0/A:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:01,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742477_1653 (size=31853) 2024-11-07T17:18:01,784 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/6a7e8e23adf74e43a98e8bb7cb78e7d3 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/6a7e8e23adf74e43a98e8bb7cb78e7d3 2024-11-07T17:18:01,788 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 6a7e8e23adf74e43a98e8bb7cb78e7d3(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:18:01,788 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:01,788 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=13, startTime=1730999881761; duration=0sec 2024-11-07T17:18:01,788 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:18:01,788 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:18:01,788 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T17:18:01,789 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T17:18:01,789 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:18:01,789 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:01,789 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/39493b771f4e46678c1d329e97102385, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/66c7b00c3caa4e0e85388248e4a2951e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/27e83efae9464112849ffef94904140f] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=36.2 K 2024-11-07T17:18:01,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39493b771f4e46678c1d329e97102385, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1730999877022 2024-11-07T17:18:01,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66c7b00c3caa4e0e85388248e4a2951e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730999877656 2024-11-07T17:18:01,790 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27e83efae9464112849ffef94904140f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1730999878782 2024-11-07T17:18:01,795 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#548 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:18:01,796 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/a064f552ad45408a8002be8be35314a9 is 50, key is test_row_0/C:col10/1730999880907/Put/seqid=0 2024-11-07T17:18:01,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742478_1654 (size=12899) 2024-11-07T17:18:01,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-07T17:18:01,989 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-07T17:18:01,990 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T17:18:01,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-07T17:18:01,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-07T17:18:01,991 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T17:18:01,992 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T17:18:01,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T17:18:02,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-07T17:18:02,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:02,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-07T17:18:02,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:18:02,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:02,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:18:02,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:02,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:18:02,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:02,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079a12969a78dd46108647af2d1d7e72e3_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999882102/Put/seqid=0 2024-11-07T17:18:02,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999942134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999942135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999942136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742479_1655 (size=12454) 2024-11-07T17:18:02,139 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:18:02,141 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079a12969a78dd46108647af2d1d7e72e3_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079a12969a78dd46108647af2d1d7e72e3_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:02,142 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/4a803b017c454ab2a86114c79134ba79, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:02,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/4a803b017c454ab2a86114c79134ba79 is 175, key is test_row_0/A:col10/1730999882102/Put/seqid=0 2024-11-07T17:18:02,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:02,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:02,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742480_1656 (size=31255) 2024-11-07T17:18:02,148 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/4a803b017c454ab2a86114c79134ba79 2024-11-07T17:18:02,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/833e023a41d74e9a97774d52e4ccaf82 is 50, key is test_row_0/B:col10/1730999882102/Put/seqid=0 2024-11-07T17:18:02,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742481_1657 (size=12301) 2024-11-07T17:18:02,176 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/71735ccc22a14d53aeb79e36be631bce as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/71735ccc22a14d53aeb79e36be631bce 2024-11-07T17:18:02,180 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into 71735ccc22a14d53aeb79e36be631bce(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:18:02,180 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:02,180 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=13, startTime=1730999881761; duration=0sec 2024-11-07T17:18:02,180 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:02,180 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:18:02,213 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/a064f552ad45408a8002be8be35314a9 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/a064f552ad45408a8002be8be35314a9 2024-11-07T17:18:02,216 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into a064f552ad45408a8002be8be35314a9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:18:02,216 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:02,216 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=13, startTime=1730999881761; duration=0sec 2024-11-07T17:18:02,216 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:02,216 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:18:02,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999942237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999942238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999942239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-07T17:18:02,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:02,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:02,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999942439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999942440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999942442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,450 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:02,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:02,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/833e023a41d74e9a97774d52e4ccaf82 2024-11-07T17:18:02,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/9ec54b0d04e54c3b91ea5192ed5057d7 is 50, key is test_row_0/C:col10/1730999882102/Put/seqid=0 2024-11-07T17:18:02,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742482_1658 (size=12301) 2024-11-07T17:18:02,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-07T17:18:02,603 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:02,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:02,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999942744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999942744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:02,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999942744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:02,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:02,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:02,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:02,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T17:18:02,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/9ec54b0d04e54c3b91ea5192ed5057d7 2024-11-07T17:18:02,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/4a803b017c454ab2a86114c79134ba79 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79 2024-11-07T17:18:02,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79, entries=150, sequenceid=293, filesize=30.5 K 2024-11-07T17:18:02,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/833e023a41d74e9a97774d52e4ccaf82 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/833e023a41d74e9a97774d52e4ccaf82 2024-11-07T17:18:02,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/833e023a41d74e9a97774d52e4ccaf82, entries=150, sequenceid=293, filesize=12.0 K 2024-11-07T17:18:02,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/9ec54b0d04e54c3b91ea5192ed5057d7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9ec54b0d04e54c3b91ea5192ed5057d7 2024-11-07T17:18:02,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9ec54b0d04e54c3b91ea5192ed5057d7, entries=150, sequenceid=293, filesize=12.0 K 2024-11-07T17:18:02,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 990807c0d50040fb7da6789c8418caee in 855ms, sequenceid=293, compaction requested=false 2024-11-07T17:18:02,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:03,060 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37403 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-07T17:18:03,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:03,061 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-07T17:18:03,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:18:03,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:03,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:18:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:18:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:03,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fd1ae7baee2a42ca9996f95dd597811c_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999882134/Put/seqid=0 2024-11-07T17:18:03,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742483_1659 (size=12454) 2024-11-07T17:18:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-07T17:18:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:03,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. as already flushing 2024-11-07T17:18:03,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999943270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999943271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999943272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999943373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999943374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999943375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:18:03,474 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fd1ae7baee2a42ca9996f95dd597811c_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fd1ae7baee2a42ca9996f95dd597811c_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:03,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/b4ecf60c17954074b74648d62321c643, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:03,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/b4ecf60c17954074b74648d62321c643 is 175, key is test_row_0/A:col10/1730999882134/Put/seqid=0 2024-11-07T17:18:03,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742484_1660 (size=31255) 2024-11-07T17:18:03,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999943576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999943576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999943577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,879 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=304, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/b4ecf60c17954074b74648d62321c643 2024-11-07T17:18:03,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51496 deadline: 1730999943879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999943881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51546 deadline: 1730999943881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:03,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/e82c4924579a45a884f9b4ca41f4cb8c is 50, key is test_row_0/B:col10/1730999882134/Put/seqid=0 2024-11-07T17:18:03,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742485_1661 (size=12301) 2024-11-07T17:18:03,889 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/e82c4924579a45a884f9b4ca41f4cb8c 2024-11-07T17:18:03,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/2e465f233bf947eb94f732929585830f is 50, key is test_row_0/C:col10/1730999882134/Put/seqid=0 2024-11-07T17:18:03,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742486_1662 (size=12301) 2024-11-07T17:18:03,899 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/2e465f233bf947eb94f732929585830f 2024-11-07T17:18:03,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/b4ecf60c17954074b74648d62321c643 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643 2024-11-07T17:18:03,905 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643, entries=150, sequenceid=304, filesize=30.5 K 2024-11-07T17:18:03,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/e82c4924579a45a884f9b4ca41f4cb8c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e82c4924579a45a884f9b4ca41f4cb8c 2024-11-07T17:18:03,908 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e82c4924579a45a884f9b4ca41f4cb8c, entries=150, sequenceid=304, filesize=12.0 K 2024-11-07T17:18:03,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/2e465f233bf947eb94f732929585830f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2e465f233bf947eb94f732929585830f 2024-11-07T17:18:03,912 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2e465f233bf947eb94f732929585830f, entries=150, sequenceid=304, filesize=12.0 K 2024-11-07T17:18:03,912 INFO [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 990807c0d50040fb7da6789c8418caee in 851ms, sequenceid=304, compaction requested=true 2024-11-07T17:18:03,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:03,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:03,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3a0fde618c86:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-07T17:18:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-07T17:18:03,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-07T17:18:03,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9220 sec 2024-11-07T17:18:03,916 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.9250 sec 2024-11-07T17:18:04,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(8581): Flush requested on 990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:04,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-07T17:18:04,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:18:04,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:04,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:18:04,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:04,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:18:04,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:04,066 DEBUG [Thread-2544 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:64938 2024-11-07T17:18:04,066 DEBUG [Thread-2544 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411077bff5f59e3ca4128bfbf4229d60810ba_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:04,066 DEBUG [Thread-2549 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:64938 2024-11-07T17:18:04,066 DEBUG [Thread-2549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,068 DEBUG [Thread-2551 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:64938 2024-11-07T17:18:04,068 DEBUG [Thread-2551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,068 DEBUG [Thread-2547 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:64938 2024-11-07T17:18:04,068 DEBUG [Thread-2547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,069 DEBUG [Thread-2555 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x410bf0c8 to 127.0.0.1:64938 2024-11-07T17:18:04,069 DEBUG [Thread-2555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742487_1663 (size=12454) 2024-11-07T17:18:04,070 DEBUG [Thread-2553 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:64938 2024-11-07T17:18:04,070 DEBUG [Thread-2542 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:64938 2024-11-07T17:18:04,070 DEBUG [Thread-2553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,070 DEBUG [Thread-2542 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-07T17:18:04,094 INFO [Thread-2546 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-07T17:18:04,384 DEBUG [Thread-2538 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:64938 2024-11-07T17:18:04,384 DEBUG [Thread-2538 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,386 DEBUG [Thread-2536 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:64938 2024-11-07T17:18:04,386 DEBUG [Thread-2536 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:04,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999944387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:04,470 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:18:04,473 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411077bff5f59e3ca4128bfbf4229d60810ba_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077bff5f59e3ca4128bfbf4229d60810ba_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:04,473 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/377cc5a8191e43deb6c6dbabaed1490b, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:04,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/377cc5a8191e43deb6c6dbabaed1490b is 175, key is test_row_0/A:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:04,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742488_1664 (size=31255) 2024-11-07T17:18:04,877 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/377cc5a8191e43deb6c6dbabaed1490b 2024-11-07T17:18:04,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/3737213cbed446bea3944c1b97976cde is 50, key is test_row_0/B:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742489_1665 (size=12301) 2024-11-07T17:18:05,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/3737213cbed446bea3944c1b97976cde 2024-11-07T17:18:05,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/c5f05dd696114e2cb34e4dbd3b09e2b8 is 50, key is test_row_0/C:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:05,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742490_1666 (size=12301) 2024-11-07T17:18:05,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T17:18:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37403 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51484 deadline: 1730999945391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:05,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/c5f05dd696114e2cb34e4dbd3b09e2b8 2024-11-07T17:18:05,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/377cc5a8191e43deb6c6dbabaed1490b as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b 2024-11-07T17:18:05,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b, entries=150, sequenceid=332, filesize=30.5 K 2024-11-07T17:18:05,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/3737213cbed446bea3944c1b97976cde as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3737213cbed446bea3944c1b97976cde 2024-11-07T17:18:05,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3737213cbed446bea3944c1b97976cde, entries=150, sequenceid=332, filesize=12.0 K 2024-11-07T17:18:05,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/c5f05dd696114e2cb34e4dbd3b09e2b8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/c5f05dd696114e2cb34e4dbd3b09e2b8 2024-11-07T17:18:05,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/c5f05dd696114e2cb34e4dbd3b09e2b8, entries=150, sequenceid=332, filesize=12.0 K 2024-11-07T17:18:05,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 990807c0d50040fb7da6789c8418caee in 1645ms, sequenceid=332, compaction requested=true 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 990807c0d50040fb7da6789c8418caee:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T17:18:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:18:05,705 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:18:05,705 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:18:05,706 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125618 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:18:05,706 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:18:05,706 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/A is initiating minor compaction (all files) 2024-11-07T17:18:05,706 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/B is initiating minor compaction (all files) 2024-11-07T17:18:05,706 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/A in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:05,706 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/B in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:05,706 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/71735ccc22a14d53aeb79e36be631bce, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/833e023a41d74e9a97774d52e4ccaf82, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e82c4924579a45a884f9b4ca41f4cb8c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3737213cbed446bea3944c1b97976cde] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=48.6 K 2024-11-07T17:18:05,706 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/6a7e8e23adf74e43a98e8bb7cb78e7d3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=122.7 K 2024-11-07T17:18:05,706 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:05,706 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. files: [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/6a7e8e23adf74e43a98e8bb7cb78e7d3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b] 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 71735ccc22a14d53aeb79e36be631bce, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1730999878782 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a7e8e23adf74e43a98e8bb7cb78e7d3, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1730999878782 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 833e023a41d74e9a97774d52e4ccaf82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1730999880985 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a803b017c454ab2a86114c79134ba79, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1730999880985 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting e82c4924579a45a884f9b4ca41f4cb8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1730999882131 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4ecf60c17954074b74648d62321c643, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1730999882131 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] compactions.Compactor(224): Compacting 377cc5a8191e43deb6c6dbabaed1490b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1730999883264 2024-11-07T17:18:05,707 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 3737213cbed446bea3944c1b97976cde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1730999883264 2024-11-07T17:18:05,712 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:05,713 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#B#compaction#558 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:18:05,713 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107657318f1a11f4e5a9547ba0abe42ca19_990807c0d50040fb7da6789c8418caee store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:05,714 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/d7ca05acdde741ea80376b634c7ce415 is 50, key is test_row_0/B:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:05,715 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107657318f1a11f4e5a9547ba0abe42ca19_990807c0d50040fb7da6789c8418caee, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:05,716 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107657318f1a11f4e5a9547ba0abe42ca19_990807c0d50040fb7da6789c8418caee because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:05,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742491_1667 (size=13085) 2024-11-07T17:18:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742492_1668 (size=4469) 2024-11-07T17:18:06,119 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#A#compaction#559 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:18:06,120 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/9889c234b9804811b1dd9d70b9c8433c is 175, key is test_row_0/A:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:06,121 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/d7ca05acdde741ea80376b634c7ce415 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d7ca05acdde741ea80376b634c7ce415 2024-11-07T17:18:06,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742493_1669 (size=32039) 2024-11-07T17:18:06,125 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 990807c0d50040fb7da6789c8418caee/B of 990807c0d50040fb7da6789c8418caee into d7ca05acdde741ea80376b634c7ce415(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:18:06,125 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:06,125 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/B, priority=12, startTime=1730999885705; duration=0sec 2024-11-07T17:18:06,125 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T17:18:06,125 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:B 2024-11-07T17:18:06,125 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T17:18:06,126 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T17:18:06,126 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1540): 990807c0d50040fb7da6789c8418caee/C is initiating minor compaction (all files) 2024-11-07T17:18:06,126 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 990807c0d50040fb7da6789c8418caee/C in TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:06,126 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/a064f552ad45408a8002be8be35314a9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9ec54b0d04e54c3b91ea5192ed5057d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2e465f233bf947eb94f732929585830f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/c5f05dd696114e2cb34e4dbd3b09e2b8] into tmpdir=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp, totalSize=48.6 K 2024-11-07T17:18:06,126 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting a064f552ad45408a8002be8be35314a9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1730999878782 2024-11-07T17:18:06,126 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ec54b0d04e54c3b91ea5192ed5057d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1730999880985 2024-11-07T17:18:06,127 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e465f233bf947eb94f732929585830f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1730999882131 2024-11-07T17:18:06,127 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] compactions.Compactor(224): Compacting c5f05dd696114e2cb34e4dbd3b09e2b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1730999883264 2024-11-07T17:18:06,132 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 990807c0d50040fb7da6789c8418caee#C#compaction#560 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T17:18:06,132 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/e8303480234a44d5ad31918a63e45ca1 is 50, key is test_row_0/C:col10/1730999883264/Put/seqid=0 2024-11-07T17:18:06,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742494_1670 (size=13085) 2024-11-07T17:18:06,526 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/9889c234b9804811b1dd9d70b9c8433c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9889c234b9804811b1dd9d70b9c8433c 2024-11-07T17:18:06,529 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 990807c0d50040fb7da6789c8418caee/A of 990807c0d50040fb7da6789c8418caee into 9889c234b9804811b1dd9d70b9c8433c(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:18:06,529 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:06,529 INFO [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/A, priority=12, startTime=1730999885705; duration=0sec 2024-11-07T17:18:06,529 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:06,529 DEBUG [RS:0;3a0fde618c86:37403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:A 2024-11-07T17:18:06,537 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/e8303480234a44d5ad31918a63e45ca1 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e8303480234a44d5ad31918a63e45ca1 2024-11-07T17:18:06,540 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 990807c0d50040fb7da6789c8418caee/C of 990807c0d50040fb7da6789c8418caee into e8303480234a44d5ad31918a63e45ca1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T17:18:06,540 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:06,540 INFO [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee., storeName=990807c0d50040fb7da6789c8418caee/C, priority=12, startTime=1730999885705; duration=0sec 2024-11-07T17:18:06,540 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T17:18:06,540 DEBUG [RS:0;3a0fde618c86:37403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 990807c0d50040fb7da6789c8418caee:C 2024-11-07T17:18:07,405 DEBUG [Thread-2540 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:64938 2024-11-07T17:18:07,406 DEBUG [Thread-2540 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7036 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6810 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6755 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7076 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6832 2024-11-07T17:18:07,406 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T17:18:07,406 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:18:07,406 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:64938 2024-11-07T17:18:07,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:07,407 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T17:18:07,407 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T17:18:07,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:07,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-07T17:18:07,409 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999887409"}]},"ts":"1730999887409"} 2024-11-07T17:18:07,410 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T17:18:07,411 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T17:18:07,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T17:18:07,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, UNASSIGN}] 2024-11-07T17:18:07,413 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, UNASSIGN 2024-11-07T17:18:07,414 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=CLOSING, regionLocation=3a0fde618c86,37403,1730999712734 2024-11-07T17:18:07,414 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T17:18:07,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734}] 2024-11-07T17:18:07,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-07T17:18:07,565 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:07,566 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close 990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing 990807c0d50040fb7da6789c8418caee, disabling compactions & flushes 2024-11-07T17:18:07,566 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. after waiting 0 ms 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:07,566 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing 990807c0d50040fb7da6789c8418caee 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=A 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=B 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 990807c0d50040fb7da6789c8418caee, store=C 2024-11-07T17:18:07,566 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T17:18:07,570 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b5578015c49f4d83b0eb2b956ff0b0cb_990807c0d50040fb7da6789c8418caee is 50, key is test_row_0/A:col10/1730999887404/Put/seqid=0 2024-11-07T17:18:07,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742495_1671 (size=12454) 2024-11-07T17:18:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-07T17:18:07,974 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T17:18:07,976 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b5578015c49f4d83b0eb2b956ff0b0cb_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b5578015c49f4d83b0eb2b956ff0b0cb_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:07,977 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/920f18b32d2340169ff9bfa1dd86c9d8, store: [table=TestAcidGuarantees family=A region=990807c0d50040fb7da6789c8418caee] 2024-11-07T17:18:07,977 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/920f18b32d2340169ff9bfa1dd86c9d8 is 175, key is test_row_0/A:col10/1730999887404/Put/seqid=0 2024-11-07T17:18:07,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742496_1672 (size=31255) 2024-11-07T17:18:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-07T17:18:08,381 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=344, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/920f18b32d2340169ff9bfa1dd86c9d8 2024-11-07T17:18:08,385 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/54ae5996e8124795823566d58eeea8f7 is 50, key is test_row_0/B:col10/1730999887404/Put/seqid=0 2024-11-07T17:18:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742497_1673 (size=12301) 2024-11-07T17:18:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-07T17:18:08,789 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/54ae5996e8124795823566d58eeea8f7 2024-11-07T17:18:08,794 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/2b583c6694704d44a8aafea26fded630 is 50, key is test_row_0/C:col10/1730999887404/Put/seqid=0 2024-11-07T17:18:08,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742498_1674 (size=12301) 2024-11-07T17:18:09,197 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/2b583c6694704d44a8aafea26fded630 2024-11-07T17:18:09,200 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/A/920f18b32d2340169ff9bfa1dd86c9d8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/920f18b32d2340169ff9bfa1dd86c9d8 2024-11-07T17:18:09,203 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/920f18b32d2340169ff9bfa1dd86c9d8, entries=150, sequenceid=344, filesize=30.5 K 2024-11-07T17:18:09,203 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/B/54ae5996e8124795823566d58eeea8f7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/54ae5996e8124795823566d58eeea8f7 2024-11-07T17:18:09,206 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/54ae5996e8124795823566d58eeea8f7, entries=150, sequenceid=344, filesize=12.0 K 2024-11-07T17:18:09,206 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/.tmp/C/2b583c6694704d44a8aafea26fded630 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2b583c6694704d44a8aafea26fded630 2024-11-07T17:18:09,208 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2b583c6694704d44a8aafea26fded630, entries=150, sequenceid=344, filesize=12.0 K 2024-11-07T17:18:09,209 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 990807c0d50040fb7da6789c8418caee in 1643ms, sequenceid=344, compaction requested=false 2024-11-07T17:18:09,209 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/69db453cf01c48ea97c04df7c4df31aa, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/008e312220bc425c9ad67a251446fea8, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4c90df905003431ba10bed79983df6da, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/902bf823bd7e4245b6488e27dd4af7eb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/8e30300db68c466ea387ee6d8fb436df, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/6a7e8e23adf74e43a98e8bb7cb78e7d3, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b] to archive 2024-11-07T17:18:09,210 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:18:09,211 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/05f56f82a8b449bf93b0ff89ea2a48ab 2024-11-07T17:18:09,212 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/2eb889ca96324e4593f6c1c98d326c97 2024-11-07T17:18:09,213 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/1162fd51dc864fa498d655bc49faa5fe 2024-11-07T17:18:09,214 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/69db453cf01c48ea97c04df7c4df31aa to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/69db453cf01c48ea97c04df7c4df31aa 2024-11-07T17:18:09,214 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f578d65196e640d199e3591c9d2769a3 2024-11-07T17:18:09,215 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d7a4a5a087b4431db03de4f129a7b621 2024-11-07T17:18:09,216 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/008e312220bc425c9ad67a251446fea8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/008e312220bc425c9ad67a251446fea8 2024-11-07T17:18:09,216 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f5a2eb7e531b407486aa1f6db8db7571 2024-11-07T17:18:09,217 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/473362c2877c4475a11792b7c8fb5739 2024-11-07T17:18:09,218 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/bcebb6b0d0764c0083493bb63b37a69e 2024-11-07T17:18:09,219 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4c90df905003431ba10bed79983df6da to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4c90df905003431ba10bed79983df6da 2024-11-07T17:18:09,219 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/f269c679c82c4267a2d9e1f3a8df2d66 2024-11-07T17:18:09,220 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/a627c8dceb1a4193a9f06296762d1823 2024-11-07T17:18:09,221 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/902bf823bd7e4245b6488e27dd4af7eb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/902bf823bd7e4245b6488e27dd4af7eb 2024-11-07T17:18:09,221 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/d067c2eef74b4db2a7ce87abbebab290 2024-11-07T17:18:09,222 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/c16b2ee8f39e4fef8dd3a8e4df4a5441 2024-11-07T17:18:09,223 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/8e30300db68c466ea387ee6d8fb436df to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/8e30300db68c466ea387ee6d8fb436df 2024-11-07T17:18:09,224 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/74a708fff9ac420cbf6d08d04450e578 2024-11-07T17:18:09,224 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/6a7e8e23adf74e43a98e8bb7cb78e7d3 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/6a7e8e23adf74e43a98e8bb7cb78e7d3 2024-11-07T17:18:09,225 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9eb7af338a6c42958483af397d331344 2024-11-07T17:18:09,226 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/4a803b017c454ab2a86114c79134ba79 2024-11-07T17:18:09,227 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/b4ecf60c17954074b74648d62321c643 2024-11-07T17:18:09,227 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/377cc5a8191e43deb6c6dbabaed1490b 2024-11-07T17:18:09,228 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/14354eb6130e433dae7aef827ee34558, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0624822464e145c093dd368c8875594a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/556b79d5b6b54ae8ba3ca5705d6fb008, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/5ee0f969dcc74b56b6b355c1a482b553, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a13c95153b044ef9a8f5e3a85297333, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0809016635d44e539476c79eee312ecf, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/dbb933587efe4229ae015e64f19e892a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/40a7932d4521450bb9b97cddbabc489a, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9823fb5298a04d4ab57d5c721b25f3ba, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d0bbfe6cfb134e3bb3e1fdea3434f645, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/ffdca72539f4419a985d9d35f2aeb6db, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/8d5f86cca8b04cdcb7d2a40a4948f4e2, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e0fdc6b169114e75b975cfc48b027470, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/185e3c4b20794a728e3f1ca3e24d35ce, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9c74cfc3e9954fab9179770bc701584b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/be425bad5d9e44dd845188f819569038, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/db00120bb6f5449f8be864a142d47ea6, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a4741c8b1a44653b453203b45bee6ff, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/71735ccc22a14d53aeb79e36be631bce, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3cb93e7a946b47ccb8f3766b27634c31, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/833e023a41d74e9a97774d52e4ccaf82, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e82c4924579a45a884f9b4ca41f4cb8c, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3737213cbed446bea3944c1b97976cde] to archive 2024-11-07T17:18:09,229 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:18:09,230 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/14354eb6130e433dae7aef827ee34558 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/14354eb6130e433dae7aef827ee34558 2024-11-07T17:18:09,230 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0624822464e145c093dd368c8875594a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0624822464e145c093dd368c8875594a 2024-11-07T17:18:09,231 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/556b79d5b6b54ae8ba3ca5705d6fb008 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/556b79d5b6b54ae8ba3ca5705d6fb008 2024-11-07T17:18:09,232 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/5ee0f969dcc74b56b6b355c1a482b553 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/5ee0f969dcc74b56b6b355c1a482b553 2024-11-07T17:18:09,233 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a13c95153b044ef9a8f5e3a85297333 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a13c95153b044ef9a8f5e3a85297333 2024-11-07T17:18:09,233 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0809016635d44e539476c79eee312ecf to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/0809016635d44e539476c79eee312ecf 2024-11-07T17:18:09,234 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/dbb933587efe4229ae015e64f19e892a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/dbb933587efe4229ae015e64f19e892a 2024-11-07T17:18:09,235 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/40a7932d4521450bb9b97cddbabc489a to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/40a7932d4521450bb9b97cddbabc489a 2024-11-07T17:18:09,235 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9823fb5298a04d4ab57d5c721b25f3ba to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9823fb5298a04d4ab57d5c721b25f3ba 2024-11-07T17:18:09,236 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d0bbfe6cfb134e3bb3e1fdea3434f645 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d0bbfe6cfb134e3bb3e1fdea3434f645 2024-11-07T17:18:09,237 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/ffdca72539f4419a985d9d35f2aeb6db to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/ffdca72539f4419a985d9d35f2aeb6db 2024-11-07T17:18:09,238 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/8d5f86cca8b04cdcb7d2a40a4948f4e2 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/8d5f86cca8b04cdcb7d2a40a4948f4e2 2024-11-07T17:18:09,238 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e0fdc6b169114e75b975cfc48b027470 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e0fdc6b169114e75b975cfc48b027470 2024-11-07T17:18:09,239 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/185e3c4b20794a728e3f1ca3e24d35ce to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/185e3c4b20794a728e3f1ca3e24d35ce 2024-11-07T17:18:09,240 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9c74cfc3e9954fab9179770bc701584b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/9c74cfc3e9954fab9179770bc701584b 2024-11-07T17:18:09,241 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/be425bad5d9e44dd845188f819569038 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/be425bad5d9e44dd845188f819569038 2024-11-07T17:18:09,241 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/db00120bb6f5449f8be864a142d47ea6 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/db00120bb6f5449f8be864a142d47ea6 2024-11-07T17:18:09,242 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a4741c8b1a44653b453203b45bee6ff to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/4a4741c8b1a44653b453203b45bee6ff 2024-11-07T17:18:09,243 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/71735ccc22a14d53aeb79e36be631bce to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/71735ccc22a14d53aeb79e36be631bce 2024-11-07T17:18:09,244 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3cb93e7a946b47ccb8f3766b27634c31 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3cb93e7a946b47ccb8f3766b27634c31 2024-11-07T17:18:09,244 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/833e023a41d74e9a97774d52e4ccaf82 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/833e023a41d74e9a97774d52e4ccaf82 2024-11-07T17:18:09,245 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e82c4924579a45a884f9b4ca41f4cb8c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/e82c4924579a45a884f9b4ca41f4cb8c 2024-11-07T17:18:09,246 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3737213cbed446bea3944c1b97976cde to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/3737213cbed446bea3944c1b97976cde 2024-11-07T17:18:09,246 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/fb35eecf00dc4a6ca5e9dafa8cb879ae, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e3ae45c727eb47429f8be17ce44f14db, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9e119ff147674ce78f58c85b41e01830, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1c1d4c798d7848f58243afea3220f743, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/878021521537459588daa803eea9fc58, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/79cee0cf58d74028a10a62979a548f4b, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/46b7504ccc094eb8b04ad64e44c924a9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/74ae5e58ff6545619cf18d3627ebdedc, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1ecc5435db804f5db429eeb0befd865d, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/48d52437324c40a79170026651570af9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/3eecd007b2b54a1cac244ee9b57be917, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/cbaf4ab6af884fda9fcd6876640f63ab, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/dd685d6749de488c8a7402507ed0c549, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/41f09ef074644d5ba7797234b03a19a5, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/63ac26ef11ed4f849fc52362cbcbb6bb, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/39493b771f4e46678c1d329e97102385, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/6e90de869c334cc18e5a2516bce61e15, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/66c7b00c3caa4e0e85388248e4a2951e, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/a064f552ad45408a8002be8be35314a9, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/27e83efae9464112849ffef94904140f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9ec54b0d04e54c3b91ea5192ed5057d7, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2e465f233bf947eb94f732929585830f, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/c5f05dd696114e2cb34e4dbd3b09e2b8] to archive 2024-11-07T17:18:09,247 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T17:18:09,248 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/fb35eecf00dc4a6ca5e9dafa8cb879ae to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/fb35eecf00dc4a6ca5e9dafa8cb879ae 2024-11-07T17:18:09,249 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e3ae45c727eb47429f8be17ce44f14db to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e3ae45c727eb47429f8be17ce44f14db 2024-11-07T17:18:09,250 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9e119ff147674ce78f58c85b41e01830 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9e119ff147674ce78f58c85b41e01830 2024-11-07T17:18:09,250 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1c1d4c798d7848f58243afea3220f743 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1c1d4c798d7848f58243afea3220f743 2024-11-07T17:18:09,251 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/878021521537459588daa803eea9fc58 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/878021521537459588daa803eea9fc58 2024-11-07T17:18:09,252 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/79cee0cf58d74028a10a62979a548f4b to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/79cee0cf58d74028a10a62979a548f4b 2024-11-07T17:18:09,252 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/46b7504ccc094eb8b04ad64e44c924a9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/46b7504ccc094eb8b04ad64e44c924a9 2024-11-07T17:18:09,253 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/74ae5e58ff6545619cf18d3627ebdedc to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/74ae5e58ff6545619cf18d3627ebdedc 2024-11-07T17:18:09,254 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1ecc5435db804f5db429eeb0befd865d to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/1ecc5435db804f5db429eeb0befd865d 2024-11-07T17:18:09,255 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/48d52437324c40a79170026651570af9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/48d52437324c40a79170026651570af9 2024-11-07T17:18:09,255 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/3eecd007b2b54a1cac244ee9b57be917 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/3eecd007b2b54a1cac244ee9b57be917 2024-11-07T17:18:09,256 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/cbaf4ab6af884fda9fcd6876640f63ab to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/cbaf4ab6af884fda9fcd6876640f63ab 2024-11-07T17:18:09,257 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/dd685d6749de488c8a7402507ed0c549 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/dd685d6749de488c8a7402507ed0c549 2024-11-07T17:18:09,258 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/41f09ef074644d5ba7797234b03a19a5 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/41f09ef074644d5ba7797234b03a19a5 2024-11-07T17:18:09,258 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/63ac26ef11ed4f849fc52362cbcbb6bb to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/63ac26ef11ed4f849fc52362cbcbb6bb 2024-11-07T17:18:09,259 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/39493b771f4e46678c1d329e97102385 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/39493b771f4e46678c1d329e97102385 2024-11-07T17:18:09,260 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/6e90de869c334cc18e5a2516bce61e15 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/6e90de869c334cc18e5a2516bce61e15 2024-11-07T17:18:09,260 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/66c7b00c3caa4e0e85388248e4a2951e to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/66c7b00c3caa4e0e85388248e4a2951e 2024-11-07T17:18:09,261 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/a064f552ad45408a8002be8be35314a9 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/a064f552ad45408a8002be8be35314a9 2024-11-07T17:18:09,262 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/27e83efae9464112849ffef94904140f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/27e83efae9464112849ffef94904140f 2024-11-07T17:18:09,263 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9ec54b0d04e54c3b91ea5192ed5057d7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/9ec54b0d04e54c3b91ea5192ed5057d7 2024-11-07T17:18:09,263 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2e465f233bf947eb94f732929585830f to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2e465f233bf947eb94f732929585830f 2024-11-07T17:18:09,264 DEBUG [StoreCloser-TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/c5f05dd696114e2cb34e4dbd3b09e2b8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/c5f05dd696114e2cb34e4dbd3b09e2b8 2024-11-07T17:18:09,267 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/recovered.edits/347.seqid, newMaxSeqId=347, maxSeqId=4 2024-11-07T17:18:09,268 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee. 2024-11-07T17:18:09,268 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for 990807c0d50040fb7da6789c8418caee: 2024-11-07T17:18:09,269 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed 990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,269 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=990807c0d50040fb7da6789c8418caee, regionState=CLOSED 2024-11-07T17:18:09,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-07T17:18:09,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure 990807c0d50040fb7da6789c8418caee, server=3a0fde618c86,37403,1730999712734 in 1.8560 sec 2024-11-07T17:18:09,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-11-07T17:18:09,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=990807c0d50040fb7da6789c8418caee, UNASSIGN in 1.8580 sec 2024-11-07T17:18:09,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-07T17:18:09,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8600 sec 2024-11-07T17:18:09,273 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730999889273"}]},"ts":"1730999889273"} 2024-11-07T17:18:09,274 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T17:18:09,276 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T17:18:09,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8690 sec 2024-11-07T17:18:09,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-07T17:18:09,512 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-07T17:18:09,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T17:18:09,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:09,514 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:09,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-07T17:18:09,514 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:09,516 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,518 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C, FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/recovered.edits] 2024-11-07T17:18:09,520 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/920f18b32d2340169ff9bfa1dd86c9d8 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/920f18b32d2340169ff9bfa1dd86c9d8 2024-11-07T17:18:09,520 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9889c234b9804811b1dd9d70b9c8433c to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/A/9889c234b9804811b1dd9d70b9c8433c 2024-11-07T17:18:09,522 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/54ae5996e8124795823566d58eeea8f7 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/54ae5996e8124795823566d58eeea8f7 2024-11-07T17:18:09,523 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d7ca05acdde741ea80376b634c7ce415 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/B/d7ca05acdde741ea80376b634c7ce415 2024-11-07T17:18:09,524 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2b583c6694704d44a8aafea26fded630 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/2b583c6694704d44a8aafea26fded630 2024-11-07T17:18:09,525 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e8303480234a44d5ad31918a63e45ca1 to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/C/e8303480234a44d5ad31918a63e45ca1 2024-11-07T17:18:09,527 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/recovered.edits/347.seqid to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee/recovered.edits/347.seqid 2024-11-07T17:18:09,528 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/default/TestAcidGuarantees/990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,528 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T17:18:09,528 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T17:18:09,528 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-07T17:18:09,530 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071931fe7034e748fa97449cc7532e516d_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071931fe7034e748fa97449cc7532e516d_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,531 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071de629e9023e47db836d5e414752d875_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411071de629e9023e47db836d5e414752d875_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,532 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072740c413870f4598bd779a3418e5eef8_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072740c413870f4598bd779a3418e5eef8_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,532 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072d1aae25cded405191eb7cae00adf2be_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072d1aae25cded405191eb7cae00adf2be_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,533 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072ed8995b615b4eb1a4b065d01243f6d1_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411072ed8995b615b4eb1a4b065d01243f6d1_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,534 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107351d70c94b0f46c2a1395d9fdec7389f_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107351d70c94b0f46c2a1395d9fdec7389f_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,535 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073c5e157363b24ba383dbda9dc5642f88_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073c5e157363b24ba383dbda9dc5642f88_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,536 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075c2aeec32ea047079cba1d21049e7eb0_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075c2aeec32ea047079cba1d21049e7eb0_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,536 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075dddb83e9efc42d8b624f48a7e2c70f1_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075dddb83e9efc42d8b624f48a7e2c70f1_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,537 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075eb844a873634c5b8a42ba159f5eb9fe_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075eb844a873634c5b8a42ba159f5eb9fe_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,538 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077bff5f59e3ca4128bfbf4229d60810ba_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077bff5f59e3ca4128bfbf4229d60810ba_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,539 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110790f3b2a3efc44e67b2e0d27928eee36b_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110790f3b2a3efc44e67b2e0d27928eee36b_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,540 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079a12969a78dd46108647af2d1d7e72e3_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079a12969a78dd46108647af2d1d7e72e3_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,540 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a79705568db94a9686c77afe49092755_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a79705568db94a9686c77afe49092755_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,541 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b5578015c49f4d83b0eb2b956ff0b0cb_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b5578015c49f4d83b0eb2b956ff0b0cb_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,542 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c99c46c854c44bdf95280f4edd5675eb_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c99c46c854c44bdf95280f4edd5675eb_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,543 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ed77b825af7d4edfa699ada3b7f2ebe6_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ed77b825af7d4edfa699ada3b7f2ebe6_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,543 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fd1ae7baee2a42ca9996f95dd597811c_990807c0d50040fb7da6789c8418caee to hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fd1ae7baee2a42ca9996f95dd597811c_990807c0d50040fb7da6789c8418caee 2024-11-07T17:18:09,544 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T17:18:09,545 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:09,547 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T17:18:09,549 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T17:18:09,549 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:09,549 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T17:18:09,550 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730999889549"}]},"ts":"9223372036854775807"} 2024-11-07T17:18:09,551 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T17:18:09,551 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 990807c0d50040fb7da6789c8418caee, NAME => 'TestAcidGuarantees,,1730999860984.990807c0d50040fb7da6789c8418caee.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T17:18:09,551 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T17:18:09,551 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730999889551"}]},"ts":"9223372036854775807"} 2024-11-07T17:18:09,552 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T17:18:09,554 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T17:18:09,554 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 41 msec 2024-11-07T17:18:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35383 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-07T17:18:09,615 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-07T17:18:09,624 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=240 (was 239) - Thread LEAK? -, OpenFileDescriptor=457 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=441 (was 496), ProcessCount=11 (was 11), AvailableMemoryMB=3402 (was 3542) 2024-11-07T17:18:09,624 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-07T17:18:09,624 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T17:18:09,625 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:64938 2024-11-07T17:18:09,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:09,625 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T17:18:09,625 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=282474894, stopped=false 2024-11-07T17:18:09,625 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=3a0fde618c86,35383,1730999712016 2024-11-07T17:18:09,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T17:18:09,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:18:09,627 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-07T17:18:09,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T17:18:09,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:18:09,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:09,627 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '3a0fde618c86,37403,1730999712734' ***** 2024-11-07T17:18:09,627 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T17:18:09,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T17:18:09,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T17:18:09,628 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(3579): Received CLOSE for 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1224): stopping server 3a0fde618c86,37403,1730999712734 2024-11-07T17:18:09,628 DEBUG [RS:0;3a0fde618c86:37403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T17:18:09,628 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T17:18:09,629 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T17:18:09,629 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3c877d3c3f531453d06f6bdf82c5263b, disabling compactions & flushes 2024-11-07T17:18:09,629 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. after waiting 0 ms 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:18:09,629 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3c877d3c3f531453d06f6bdf82c5263b 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-07T17:18:09,629 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-07T17:18:09,629 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 3c877d3c3f531453d06f6bdf82c5263b=hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b.} 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-07T17:18:09,629 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T17:18:09,629 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T17:18:09,629 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-07T17:18:09,629 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:18:09,644 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/.tmp/info/949517533b8c492396d8472f7fd80c6c is 45, key is default/info:d/1730999718012/Put/seqid=0 2024-11-07T17:18:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742499_1675 (size=5037) 2024-11-07T17:18:09,649 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/info/cce3ba5478734bd2add8fe10f111a71f is 143, key is hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b./info:regioninfo/1730999717891/Put/seqid=0 2024-11-07T17:18:09,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742500_1676 (size=7725) 2024-11-07T17:18:09,718 INFO [regionserver/3a0fde618c86:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T17:18:09,830 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:18:10,030 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3c877d3c3f531453d06f6bdf82c5263b 2024-11-07T17:18:10,048 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/.tmp/info/949517533b8c492396d8472f7fd80c6c 2024-11-07T17:18:10,051 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/.tmp/info/949517533b8c492396d8472f7fd80c6c as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/info/949517533b8c492396d8472f7fd80c6c 2024-11-07T17:18:10,052 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/info/cce3ba5478734bd2add8fe10f111a71f 2024-11-07T17:18:10,054 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/info/949517533b8c492396d8472f7fd80c6c, entries=2, sequenceid=6, filesize=4.9 K 2024-11-07T17:18:10,054 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 3c877d3c3f531453d06f6bdf82c5263b in 425ms, sequenceid=6, compaction requested=false 2024-11-07T17:18:10,057 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/namespace/3c877d3c3f531453d06f6bdf82c5263b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T17:18:10,057 INFO [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:18:10,057 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3c877d3c3f531453d06f6bdf82c5263b: 2024-11-07T17:18:10,057 DEBUG [RS_CLOSE_REGION-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1730999716663.3c877d3c3f531453d06f6bdf82c5263b. 2024-11-07T17:18:10,069 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/rep_barrier/0661a3da191c4a108c62f558cc7ce856 is 102, key is TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906./rep_barrier:/1730999742868/DeleteFamily/seqid=0 2024-11-07T17:18:10,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742501_1677 (size=6025) 2024-11-07T17:18:10,230 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T17:18:10,430 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T17:18:10,456 INFO [regionserver/3a0fde618c86:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T17:18:10,457 INFO [regionserver/3a0fde618c86:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T17:18:10,472 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/rep_barrier/0661a3da191c4a108c62f558cc7ce856 2024-11-07T17:18:10,489 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/table/f0a12f41269a4fbc8b208dfb2af5fed7 is 96, key is TestAcidGuarantees,,1730999718189.852ea2728c497a9e191625c6cb13c906./table:/1730999742868/DeleteFamily/seqid=0 2024-11-07T17:18:10,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742502_1678 (size=5942) 2024-11-07T17:18:10,631 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-07T17:18:10,631 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-07T17:18:10,631 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T17:18:10,831 DEBUG [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T17:18:10,893 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/table/f0a12f41269a4fbc8b208dfb2af5fed7 2024-11-07T17:18:10,896 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/info/cce3ba5478734bd2add8fe10f111a71f as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/info/cce3ba5478734bd2add8fe10f111a71f 2024-11-07T17:18:10,898 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/info/cce3ba5478734bd2add8fe10f111a71f, entries=22, sequenceid=93, filesize=7.5 K 2024-11-07T17:18:10,899 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/rep_barrier/0661a3da191c4a108c62f558cc7ce856 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/rep_barrier/0661a3da191c4a108c62f558cc7ce856 2024-11-07T17:18:10,901 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/rep_barrier/0661a3da191c4a108c62f558cc7ce856, entries=6, sequenceid=93, filesize=5.9 K 2024-11-07T17:18:10,902 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/.tmp/table/f0a12f41269a4fbc8b208dfb2af5fed7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/table/f0a12f41269a4fbc8b208dfb2af5fed7 2024-11-07T17:18:10,904 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/table/f0a12f41269a4fbc8b208dfb2af5fed7, entries=9, sequenceid=93, filesize=5.8 K 2024-11-07T17:18:10,905 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1276ms, sequenceid=93, compaction requested=false 2024-11-07T17:18:10,908 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-07T17:18:10,908 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T17:18:10,908 INFO [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-07T17:18:10,908 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-07T17:18:10,909 DEBUG [RS_CLOSE_META-regionserver/3a0fde618c86:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T17:18:11,031 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1250): stopping server 3a0fde618c86,37403,1730999712734; all regions closed. 2024-11-07T17:18:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741834_1010 (size=26050) 2024-11-07T17:18:11,037 DEBUG [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/oldWALs 2024-11-07T17:18:11,037 INFO [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 3a0fde618c86%2C37403%2C1730999712734.meta:.meta(num 1730999716423) 2024-11-07T17:18:11,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741832_1008 (size=15659373) 2024-11-07T17:18:11,042 DEBUG [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/oldWALs 2024-11-07T17:18:11,042 INFO [RS:0;3a0fde618c86:37403 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 3a0fde618c86%2C37403%2C1730999712734:(num 1730999715522) 2024-11-07T17:18:11,042 DEBUG [RS:0;3a0fde618c86:37403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:11,042 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T17:18:11,042 INFO [RS:0;3a0fde618c86:37403 {}] hbase.ChoreService(370): Chore service for: regionserver/3a0fde618c86:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T17:18:11,043 INFO [regionserver/3a0fde618c86:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T17:18:11,043 INFO [RS:0;3a0fde618c86:37403 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37403 2024-11-07T17:18:11,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3a0fde618c86,37403,1730999712734 2024-11-07T17:18:11,047 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$358/0x00007f6fe88f34e8@29df2dc9 rejected from java.util.concurrent.ThreadPoolExecutor@6cddb696[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-07T17:18:11,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T17:18:11,048 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3a0fde618c86,37403,1730999712734] 2024-11-07T17:18:11,048 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 3a0fde618c86,37403,1730999712734; numProcessing=1 2024-11-07T17:18:11,049 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/3a0fde618c86,37403,1730999712734 already deleted, retry=false 2024-11-07T17:18:11,049 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 3a0fde618c86,37403,1730999712734 expired; onlineServers=0 2024-11-07T17:18:11,049 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '3a0fde618c86,35383,1730999712016' ***** 2024-11-07T17:18:11,049 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T17:18:11,049 DEBUG [M:0;3a0fde618c86:35383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@679d3aa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3a0fde618c86/172.17.0.2:0 2024-11-07T17:18:11,049 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegionServer(1224): stopping server 3a0fde618c86,35383,1730999712016 2024-11-07T17:18:11,049 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegionServer(1250): stopping server 3a0fde618c86,35383,1730999712016; all regions closed. 2024-11-07T17:18:11,049 DEBUG [M:0;3a0fde618c86:35383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T17:18:11,049 DEBUG [M:0;3a0fde618c86:35383 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T17:18:11,050 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T17:18:11,050 DEBUG [M:0;3a0fde618c86:35383 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T17:18:11,050 DEBUG [master/3a0fde618c86:0:becomeActiveMaster-HFileCleaner.small.0-1730999715192 {}] cleaner.HFileCleaner(306): Exit Thread[master/3a0fde618c86:0:becomeActiveMaster-HFileCleaner.small.0-1730999715192,5,FailOnTimeoutGroup] 2024-11-07T17:18:11,050 DEBUG [master/3a0fde618c86:0:becomeActiveMaster-HFileCleaner.large.0-1730999715189 {}] cleaner.HFileCleaner(306): Exit Thread[master/3a0fde618c86:0:becomeActiveMaster-HFileCleaner.large.0-1730999715189,5,FailOnTimeoutGroup] 2024-11-07T17:18:11,050 INFO [M:0;3a0fde618c86:35383 {}] hbase.ChoreService(370): Chore service for: master/3a0fde618c86:0 had [] on shutdown 2024-11-07T17:18:11,050 DEBUG [M:0;3a0fde618c86:35383 {}] master.HMaster(1733): Stopping service threads 2024-11-07T17:18:11,050 INFO [M:0;3a0fde618c86:35383 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T17:18:11,050 ERROR [M:0;3a0fde618c86:35383 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:39903 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:39903,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-07T17:18:11,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T17:18:11,051 INFO [M:0;3a0fde618c86:35383 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T17:18:11,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T17:18:11,051 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T17:18:11,051 DEBUG [M:0;3a0fde618c86:35383 {}] zookeeper.ZKUtil(347): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T17:18:11,051 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T17:18:11,051 WARN [M:0;3a0fde618c86:35383 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T17:18:11,051 INFO [M:0;3a0fde618c86:35383 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-07T17:18:11,051 INFO [M:0;3a0fde618c86:35383 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T17:18:11,052 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T17:18:11,052 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:18:11,052 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:18:11,052 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T17:18:11,052 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:18:11,052 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=757.72 KB heapSize=930.31 KB 2024-11-07T17:18:11,067 DEBUG [M:0;3a0fde618c86:35383 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4c1ffa4d5c84787bdc8665952041e13 is 82, key is hbase:meta,,1/info:regioninfo/1730999716558/Put/seqid=0 2024-11-07T17:18:11,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742503_1679 (size=5672) 2024-11-07T17:18:11,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T17:18:11,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T17:18:11,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37403-0x10183baeb4b0001, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T17:18:11,148 INFO [RS:0;3a0fde618c86:37403 {}] regionserver.HRegionServer(1307): Exiting; stopping=3a0fde618c86,37403,1730999712734; zookeeper connection closed. 2024-11-07T17:18:11,148 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ee4e0e2 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ee4e0e2 2024-11-07T17:18:11,149 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T17:18:11,470 INFO [M:0;3a0fde618c86:35383 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4c1ffa4d5c84787bdc8665952041e13 2024-11-07T17:18:11,491 DEBUG [M:0;3a0fde618c86:35383 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d2a4ef0c392a453da8da5e906ae960a7 is 2279, key is \x00\x00\x00\x00\x00\x00\x00\x9E/proc:d/1730999863999/Put/seqid=0 2024-11-07T17:18:11,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742504_1680 (size=45793) 2024-11-07T17:18:11,894 INFO [M:0;3a0fde618c86:35383 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=757.17 KB at sequenceid=2117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d2a4ef0c392a453da8da5e906ae960a7 2024-11-07T17:18:11,897 INFO [M:0;3a0fde618c86:35383 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d2a4ef0c392a453da8da5e906ae960a7 2024-11-07T17:18:11,912 DEBUG [M:0;3a0fde618c86:35383 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/323a056b2c664f5f85195d2c2f1ed9b8 is 69, key is 3a0fde618c86,37403,1730999712734/rs:state/1730999715303/Put/seqid=0 2024-11-07T17:18:11,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073742505_1681 (size=5156) 2024-11-07T17:18:12,315 INFO [M:0;3a0fde618c86:35383 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2117 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/323a056b2c664f5f85195d2c2f1ed9b8 2024-11-07T17:18:12,318 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4c1ffa4d5c84787bdc8665952041e13 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e4c1ffa4d5c84787bdc8665952041e13 2024-11-07T17:18:12,321 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e4c1ffa4d5c84787bdc8665952041e13, entries=8, sequenceid=2117, filesize=5.5 K 2024-11-07T17:18:12,322 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d2a4ef0c392a453da8da5e906ae960a7 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d2a4ef0c392a453da8da5e906ae960a7 2024-11-07T17:18:12,324 INFO [M:0;3a0fde618c86:35383 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d2a4ef0c392a453da8da5e906ae960a7 2024-11-07T17:18:12,324 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d2a4ef0c392a453da8da5e906ae960a7, entries=183, sequenceid=2117, filesize=44.7 K 2024-11-07T17:18:12,324 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/323a056b2c664f5f85195d2c2f1ed9b8 as hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/323a056b2c664f5f85195d2c2f1ed9b8 2024-11-07T17:18:12,326 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39903/user/jenkins/test-data/e7f04787-90d3-15f1-131f-f9814aec3e17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/323a056b2c664f5f85195d2c2f1ed9b8, entries=1, sequenceid=2117, filesize=5.0 K 2024-11-07T17:18:12,327 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(3040): Finished flush of dataSize ~757.72 KB/775907, heapSize ~930.02 KB/952336, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1275ms, sequenceid=2117, compaction requested=false 2024-11-07T17:18:12,328 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T17:18:12,328 DEBUG [M:0;3a0fde618c86:35383 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T17:18:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741830_1006 (size=914767) 2024-11-07T17:18:12,331 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T17:18:12,331 INFO [M:0;3a0fde618c86:35383 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-07T17:18:12,331 INFO [M:0;3a0fde618c86:35383 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35383 2024-11-07T17:18:12,332 DEBUG [M:0;3a0fde618c86:35383 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/3a0fde618c86,35383,1730999712016 already deleted, retry=false 2024-11-07T17:18:12,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T17:18:12,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35383-0x10183baeb4b0000, quorum=127.0.0.1:64938, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T17:18:12,434 INFO [M:0;3a0fde618c86:35383 {}] regionserver.HRegionServer(1307): Exiting; stopping=3a0fde618c86,35383,1730999712016; zookeeper connection closed. 2024-11-07T17:18:12,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T17:18:12,441 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T17:18:12,441 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T17:18:12,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T17:18:12,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/hadoop.log.dir/,STOPPED} 2024-11-07T17:18:12,444 WARN [BP-2016808812-172.17.0.2-1730999709066 heartbeating to localhost/127.0.0.1:39903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T17:18:12,444 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T17:18:12,444 WARN [BP-2016808812-172.17.0.2-1730999709066 heartbeating to localhost/127.0.0.1:39903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2016808812-172.17.0.2-1730999709066 (Datanode Uuid 5e212fae-8090-491e-bac2-98c16dabef77) service to localhost/127.0.0.1:39903 2024-11-07T17:18:12,444 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T17:18:12,446 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/dfs/data/data1/current/BP-2016808812-172.17.0.2-1730999709066 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T17:18:12,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/cluster_5c1c1720-808f-e015-ddc1-fd2a6dabdfc3/dfs/data/data2/current/BP-2016808812-172.17.0.2-1730999709066 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T17:18:12,447 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T17:18:12,454 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T17:18:12,455 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T17:18:12,455 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T17:18:12,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T17:18:12,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4d0713e7-a4e8-6ed1-4ecb-647bd73e1192/hadoop.log.dir/,STOPPED} 2024-11-07T17:18:12,473 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-07T17:18:12,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T17:18:12,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T17:18:12,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T17:18:12,502 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-07T17:18:12,502 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T17:18:12,612 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down